ZTWHHH commited on
Commit
e49c6a8
·
verified ·
1 Parent(s): cd31685

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h +23 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h +23 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h +5 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h +30 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h +53 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h +36 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h +95 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h +10 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adagrad.h +109 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adam.h +92 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/lbfgs.h +103 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/optimizer.h +217 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/rmsprop.h +95 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/serialize.h +309 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/ordered_dict.h +516 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/special.h +1405 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/types.h +65 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/utils.h +116 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/version.h +14 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h +322 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h +184 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h +61 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h +201 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h +25 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h +1841 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir_views.h +164 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/irparser.h +40 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/named_value.h +84 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/node_hashing.h +17 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/scope.h +220 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h +20 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h +24 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h +11 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h +13 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h +17 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h +19 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h +15 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h +12 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h +14 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h +14 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h +75 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h +511 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h +12 -0
  45. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h +100 -0
  46. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h +8 -0
  47. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h +885 -0
  48. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h +17 -0
  49. vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/LICENSE +466 -0
  50. vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/config.cuh +50 -0
.gitattributes CHANGED
@@ -1707,3 +1707,4 @@ vllm/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpytho
1707
  valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
1708
  vllm/lib/python3.10/site-packages/cupy/_util.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1709
  vllm/lib/python3.10/site-packages/cupy/cuda/common.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
1707
  valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
1708
  vllm/lib/python3.10/site-packages/cupy/_util.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1709
  vllm/lib/python3.10/site-packages/cupy/cuda/common.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1710
+ vllm/lib/python3.10/site-packages/cupy/cuda/pinned_memory.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(_MSC_VER) && __cplusplus < 201703L
4
+ #error C++17 or later compatible compiler is required to use PyTorch.
5
+ #endif
6
+
7
+ #include <torch/autograd.h>
8
+ #include <torch/cuda.h>
9
+ #include <torch/data.h>
10
+ #include <torch/enum.h>
11
+ #include <torch/fft.h>
12
+ #include <torch/jit.h>
13
+ #include <torch/linalg.h>
14
+ #include <torch/mps.h>
15
+ #include <torch/nested.h>
16
+ #include <torch/nn.h>
17
+ #include <torch/optim.h>
18
+ #include <torch/serialize.h>
19
+ #include <torch/sparse.h>
20
+ #include <torch/special.h>
21
+ #include <torch/types.h>
22
+ #include <torch/utils.h>
23
+ #include <torch/version.h>
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <utility>
4
+
5
+ #define TORCH_ARG(T, name) \
6
+ public: \
7
+ inline auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ \
8
+ this->name##_ = new_##name; \
9
+ return *this; \
10
+ } \
11
+ inline auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ \
12
+ this->name##_ = std::move(new_##name); \
13
+ return *this; \
14
+ } \
15
+ inline const T& name() const noexcept { /* NOLINT */ \
16
+ return this->name##_; \
17
+ } \
18
+ inline T& name() noexcept { /* NOLINT */ \
19
+ return this->name##_; \
20
+ } \
21
+ \
22
+ private: \
23
+ T name##_ /* NOLINT */
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/autograd.h>
4
+ #include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
5
+ #include <torch/csrc/autograd/custom_function.h>
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+
8
+ namespace torch {
9
+ namespace cuda {
10
+
11
+ /// Returns the number of CUDA devices available.
12
+ size_t TORCH_API device_count();
13
+
14
+ /// Returns true if at least one CUDA device is available.
15
+ bool TORCH_API is_available();
16
+
17
+ /// Returns true if CUDA is available, and CuDNN is available.
18
+ bool TORCH_API cudnn_is_available();
19
+
20
+ /// Sets the seed for the current GPU.
21
+ void TORCH_API manual_seed(uint64_t seed);
22
+
23
+ /// Sets the seed for all available GPUs.
24
+ void TORCH_API manual_seed_all(uint64_t seed);
25
+
26
+ /// Waits for all kernels in all streams on a CUDA device to complete.
27
+ void TORCH_API synchronize(int64_t device_index = -1);
28
+
29
+ } // namespace cuda
30
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <vector>
4
+
5
+ namespace torch {
6
+
7
+ class TORCH_API IMethod {
8
+ /*
9
+ IMethod provides a portable interface for torch methods, whether
10
+ they are backed by torchscript or python/deploy.
11
+
12
+ This is helpful since torchscript methods provide additional information
13
+ (e.g. FunctionSchema, Graph) which aren't available in pure python methods.
14
+
15
+ Higher level APIs should prefer depending on this interface rather
16
+ than a specific implementation of it, to promote portability and reuse, and
17
+ avoid unintentional dependencies on e.g. script methods.
18
+
19
+ Note: This API is experimental, and may evolve.
20
+ */
21
+ public:
22
+ using IValueList = std::vector<c10::IValue>;
23
+ using IValueMap = std::unordered_map<std::string, at::IValue>;
24
+
25
+ IMethod() = default;
26
+ IMethod(const IMethod&) = default;
27
+ IMethod& operator=(const IMethod&) = default;
28
+ IMethod(IMethod&&) noexcept = default;
29
+ IMethod& operator=(IMethod&&) noexcept = default;
30
+ virtual ~IMethod() = default;
31
+
32
+ virtual c10::IValue operator()(
33
+ std::vector<c10::IValue> args,
34
+ const IValueMap& kwargs = IValueMap()) const = 0;
35
+
36
+ virtual const std::string& name() const = 0;
37
+
38
+ // Returns an ordered list of argument names, possible in both
39
+ // script and python methods. This is a more portable dependency
40
+ // than a ScriptMethod FunctionSchema, which has more information
41
+ // than can be generally expected from a python method.
42
+ const std::vector<std::string>& getArgumentNames() const;
43
+
44
+ protected:
45
+ virtual void setArgumentNames(
46
+ std::vector<std::string>& argumentNames) const = 0;
47
+
48
+ private:
49
+ mutable bool isArgumentNamesInitialized_{false};
50
+ mutable std::vector<std::string> argumentNames_;
51
+ };
52
+
53
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+
6
+ #include <memory>
7
+ #include <string>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ /// Compiles script code into an executable graph.
13
+ ///
14
+ /// Takes a string containing functions in script syntax and compiles them into
15
+ /// a module (graph). The returned module provides a `run_method` function
16
+ /// that may be used to invoke the compiled functions.
17
+ ///
18
+ /// For example:
19
+ /// \rst
20
+ /// .. code-block:: cpp
21
+ ///
22
+ /// auto module = torch::jit::compile(R"JIT(
23
+ /// def relu_script(a, b):
24
+ /// return torch.relu(a + b)
25
+ /// def test_while(a, i):
26
+ /// while i < 10:
27
+ /// a += a
28
+ /// i += 1
29
+ /// return a
30
+ /// )JIT");
31
+ /// IValue output = module->run_method("relu_script", a, b);
32
+ /// \endrst
33
+ TORCH_API std::shared_ptr<CompilationUnit> compile(const std::string& source);
34
+
35
+ } // namespace jit
36
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ATen_fwd.h>
5
+ #include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
6
+ #include <algorithm>
7
+
8
+ namespace torch {
9
+ namespace nested {
10
+
11
+ /// Nested tensor
12
+ ///
13
+ /// See
14
+ /// https://pytorch.org/docs/master/nested.html#torch.nested.nested_tensor
15
+ ///
16
+ /// ```
17
+ // implemented on python object to allow torch.nested.nested_tensor to be
18
+ // constructed with arbitrarily nested python objects - for now, only arbitrary
19
+ // python lists and lists of Tensors
20
+ // See torch/csrc/autograd/python_nested_functions_manual.cpp for Python
21
+ // implementation
22
+ // See here for C++ implementation
23
+ inline at::Tensor nested_tensor(
24
+ at::TensorList nested_tensor_data,
25
+ const at::TensorOptions& options = {}) {
26
+ auto out = at::_nested_tensor_from_tensor_list(
27
+ nested_tensor_data,
28
+ c10::typeMetaToScalarType(options.dtype()),
29
+ c10::nullopt,
30
+ options.device(),
31
+ options.pinned_memory());
32
+ if (options.has_requires_grad() && options.requires_grad()) {
33
+ out.requires_grad_(true);
34
+ }
35
+ return out;
36
+ }
37
+
38
+ inline at::Tensor nested_tensor(
39
+ at::ArrayRef<detail::TensorDataContainer> nested_tensor_data,
40
+ const at::TensorOptions& options = {}) {
41
+ for (const auto& tdc : nested_tensor_data) {
42
+ TORCH_CHECK(
43
+ tdc.is_init_list(),
44
+ "nested_tensor() not implemented for these parameters");
45
+ }
46
+ // Construct a TensorList using nested_tensor_data
47
+ std::vector<at::Tensor> tensor_list(nested_tensor_data.size());
48
+ std::transform(
49
+ nested_tensor_data.begin(),
50
+ nested_tensor_data.end(),
51
+ tensor_list.begin(),
52
+ [&](const detail::TensorDataContainer& tdc) {
53
+ return tdc.convert_to_tensor(options);
54
+ });
55
+ auto out = at::_nested_tensor_from_tensor_list(
56
+ tensor_list,
57
+ c10::typeMetaToScalarType(options.dtype()),
58
+ c10::nullopt,
59
+ options.device(),
60
+ options.pinned_memory());
61
+ if (options.has_requires_grad() && options.requires_grad()) {
62
+ out.requires_grad_(true);
63
+ }
64
+ return out;
65
+ }
66
+
67
+ /// As Nested Tensor
68
+ ///
69
+ /// See
70
+ /// https://pytorch.org/docs/master/nested.html#torch.nested.as_nested_tensor
71
+ ///
72
+ /// ```
73
+ inline at::Tensor as_nested_tensor(
74
+ at::TensorList list,
75
+ c10::optional<at::ScalarType> dtype = c10::nullopt,
76
+ c10::optional<at::Device> device = c10::nullopt) {
77
+ return at::_nested_tensor_from_tensor_list(
78
+ list, dtype, c10::nullopt, device, c10::nullopt);
79
+ }
80
+
81
+ /// Nested to padded tensor
82
+ ///
83
+ /// See
84
+ /// https://pytorch.org/docs/master/nested.html#torch.nested.to_padded_tensor
85
+ ///
86
+ /// ```
87
+ inline at::Tensor to_padded_tensor(
88
+ const at::Tensor& self,
89
+ double padding,
90
+ at::OptionalIntArrayRef output_size = c10::nullopt) {
91
+ return at::nested_to_padded_tensor(self, padding, output_size);
92
+ }
93
+
94
+ } // namespace nested
95
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional.h>
5
+ #include <torch/nn/init.h>
6
+ #include <torch/nn/module.h>
7
+ #include <torch/nn/modules.h>
8
+ #include <torch/nn/options.h>
9
+ #include <torch/nn/pimpl.h>
10
+ #include <torch/nn/utils.h>
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adagrad.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/pimpl.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ namespace torch {
13
+ namespace serialize {
14
+ class OutputArchive;
15
+ class InputArchive;
16
+ } // namespace serialize
17
+ } // namespace torch
18
+
19
+ namespace torch {
20
+ namespace optim {
21
+
22
+ struct TORCH_API AdagradOptions
23
+ : public OptimizerCloneableOptions<AdagradOptions> {
24
+ AdagradOptions(double lr = 1e-2);
25
+ TORCH_ARG(double, lr) = 1e-2;
26
+ TORCH_ARG(double, lr_decay) = 0;
27
+ TORCH_ARG(double, weight_decay) = 0;
28
+ TORCH_ARG(double, initial_accumulator_value) = 0;
29
+ TORCH_ARG(double, eps) = 1e-10;
30
+
31
+ public:
32
+ void serialize(torch::serialize::InputArchive& archive) override;
33
+ void serialize(torch::serialize::OutputArchive& archive) const override;
34
+ TORCH_API friend bool operator==(
35
+ const AdagradOptions& lhs,
36
+ const AdagradOptions& rhs);
37
+ double get_lr() const override;
38
+ void set_lr(const double lr) override;
39
+ };
40
+
41
+ struct TORCH_API AdagradParamState
42
+ : public OptimizerCloneableParamState<AdagradParamState> {
43
+ TORCH_ARG(torch::Tensor, sum);
44
+ TORCH_ARG(int64_t, step) = 0;
45
+
46
+ public:
47
+ AdagradParamState() = default;
48
+ AdagradParamState(const AdagradParamState&) = default;
49
+ AdagradParamState& operator=(const AdagradParamState&) = default;
50
+ AdagradParamState(AdagradParamState&&) noexcept = default;
51
+ AdagradParamState& operator=(AdagradParamState&&) noexcept = default;
52
+ void serialize(torch::serialize::InputArchive& archive) override;
53
+ void serialize(torch::serialize::OutputArchive& archive) const override;
54
+ TORCH_API friend bool operator==(
55
+ const AdagradParamState& lhs,
56
+ const AdagradParamState& rhs);
57
+ };
58
+
59
+ class TORCH_API Adagrad : public Optimizer {
60
+ public:
61
+ explicit Adagrad(
62
+ std::vector<OptimizerParamGroup> param_groups,
63
+ AdagradOptions defaults = {})
64
+ : Optimizer(
65
+ std::move(param_groups),
66
+ std::make_unique<AdagradOptions>(defaults)) {
67
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
68
+ TORCH_CHECK(
69
+ defaults.lr_decay() >= 0,
70
+ "Invalid lr_decay value: ",
71
+ defaults.lr_decay());
72
+ TORCH_CHECK(
73
+ defaults.weight_decay() >= 0,
74
+ "Invalid weight_decay value: ",
75
+ defaults.weight_decay());
76
+ TORCH_CHECK(
77
+ defaults.initial_accumulator_value() >= 0,
78
+ "Invalid initial_accumulator_value value: ",
79
+ defaults.initial_accumulator_value());
80
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
81
+
82
+ for (const auto& group : param_groups_) {
83
+ for (const auto& p : group.params()) {
84
+ auto state = std::make_unique<AdagradParamState>();
85
+ state->step(0);
86
+ state->sum(torch::full_like(
87
+ p.data(),
88
+ defaults.initial_accumulator_value(),
89
+ at::MemoryFormat::Preserve));
90
+ state_[p.unsafeGetTensorImpl()] = std::move(state);
91
+ }
92
+ }
93
+ }
94
+
95
+ explicit Adagrad(std::vector<Tensor> params, AdagradOptions defaults = {})
96
+ : Adagrad({OptimizerParamGroup(std::move(params))}, defaults) {}
97
+
98
+ torch::Tensor step(LossClosure closure = nullptr) override;
99
+ void save(serialize::OutputArchive& archive) const override;
100
+ void load(serialize::InputArchive& archive) override;
101
+
102
+ private:
103
+ template <typename Self, typename Archive>
104
+ static void serialize(Self& self, Archive& archive) {
105
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(Adagrad);
106
+ }
107
+ };
108
+ } // namespace optim
109
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adam.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace optim {
19
+
20
+ struct TORCH_API AdamOptions : public OptimizerCloneableOptions<AdamOptions> {
21
+ AdamOptions(double lr = 1e-3);
22
+ TORCH_ARG(double, lr) = 1e-3;
23
+ typedef std::tuple<double, double> betas_t;
24
+ TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999);
25
+ TORCH_ARG(double, eps) = 1e-8;
26
+ TORCH_ARG(double, weight_decay) = 0;
27
+ TORCH_ARG(bool, amsgrad) = false;
28
+
29
+ public:
30
+ void serialize(torch::serialize::InputArchive& archive) override;
31
+ void serialize(torch::serialize::OutputArchive& archive) const override;
32
+ TORCH_API friend bool operator==(
33
+ const AdamOptions& lhs,
34
+ const AdamOptions& rhs);
35
+ double get_lr() const override;
36
+ void set_lr(const double lr) override;
37
+ };
38
+
39
+ struct TORCH_API AdamParamState
40
+ : public OptimizerCloneableParamState<AdamParamState> {
41
+ TORCH_ARG(int64_t, step) = 0;
42
+ TORCH_ARG(torch::Tensor, exp_avg);
43
+ TORCH_ARG(torch::Tensor, exp_avg_sq);
44
+ TORCH_ARG(torch::Tensor, max_exp_avg_sq) = {};
45
+
46
+ public:
47
+ void serialize(torch::serialize::InputArchive& archive) override;
48
+ void serialize(torch::serialize::OutputArchive& archive) const override;
49
+ TORCH_API friend bool operator==(
50
+ const AdamParamState& lhs,
51
+ const AdamParamState& rhs);
52
+ };
53
+
54
+ class TORCH_API Adam : public Optimizer {
55
+ public:
56
+ explicit Adam(
57
+ std::vector<OptimizerParamGroup> param_groups,
58
+ AdamOptions defaults = {})
59
+ : Optimizer(
60
+ std::move(param_groups),
61
+ std::make_unique<AdamOptions>(defaults)) {
62
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
63
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
64
+ auto betas = defaults.betas();
65
+ TORCH_CHECK(
66
+ 0 <= std::get<0>(betas) && std::get<0>(betas) < 1.0,
67
+ "Invalid beta parameter at index 0: ",
68
+ std::get<0>(betas));
69
+ TORCH_CHECK(
70
+ 0 <= std::get<1>(betas) && std::get<1>(betas) < 1.0,
71
+ "Invalid beta parameter at index 1: ",
72
+ std::get<1>(betas));
73
+ TORCH_CHECK(
74
+ defaults.weight_decay() >= 0,
75
+ "Invalid weight_decay value: ",
76
+ defaults.weight_decay());
77
+ }
78
+ explicit Adam(std::vector<Tensor> params, AdamOptions defaults = {})
79
+ : Adam({OptimizerParamGroup(std::move(params))}, defaults) {}
80
+
81
+ torch::Tensor step(LossClosure closure = nullptr) override;
82
+ void save(serialize::OutputArchive& archive) const override;
83
+ void load(serialize::InputArchive& archive) override;
84
+
85
+ private:
86
+ template <typename Self, typename Archive>
87
+ static void serialize(Self& self, Archive& archive) {
88
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(Adam);
89
+ }
90
+ };
91
+ } // namespace optim
92
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/lbfgs.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+
8
+ #include <deque>
9
+ #include <functional>
10
+ #include <memory>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace optim {
15
+
16
+ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions<LBFGSOptions> {
17
+ LBFGSOptions(double lr = 1);
18
+ TORCH_ARG(double, lr) = 1;
19
+ TORCH_ARG(int64_t, max_iter) = 20;
20
+ TORCH_ARG(c10::optional<int64_t>, max_eval) = c10::nullopt;
21
+ TORCH_ARG(double, tolerance_grad) = 1e-7;
22
+ TORCH_ARG(double, tolerance_change) = 1e-9;
23
+ TORCH_ARG(int64_t, history_size) = 100;
24
+ TORCH_ARG(c10::optional<std::string>, line_search_fn) = c10::nullopt;
25
+
26
+ public:
27
+ void serialize(torch::serialize::InputArchive& archive) override;
28
+ void serialize(torch::serialize::OutputArchive& archive) const override;
29
+ TORCH_API friend bool operator==(
30
+ const LBFGSOptions& lhs,
31
+ const LBFGSOptions& rhs);
32
+ double get_lr() const override;
33
+ void set_lr(const double lr) override;
34
+ };
35
+
36
+ struct TORCH_API LBFGSParamState
37
+ : public OptimizerCloneableParamState<LBFGSParamState> {
38
+ TORCH_ARG(int64_t, func_evals) = 0;
39
+ TORCH_ARG(int64_t, n_iter) = 0;
40
+ TORCH_ARG(double, t) = 0;
41
+ TORCH_ARG(double, prev_loss) = 0;
42
+ TORCH_ARG(Tensor, d) = {};
43
+ TORCH_ARG(Tensor, H_diag) = {};
44
+ TORCH_ARG(Tensor, prev_flat_grad) = {};
45
+ TORCH_ARG(std::deque<Tensor>, old_dirs);
46
+ TORCH_ARG(std::deque<Tensor>, old_stps);
47
+ TORCH_ARG(std::deque<Tensor>, ro);
48
+ TORCH_ARG(c10::optional<std::vector<Tensor>>, al) = c10::nullopt;
49
+
50
+ public:
51
+ void serialize(torch::serialize::InputArchive& archive) override;
52
+ void serialize(torch::serialize::OutputArchive& archive) const override;
53
+ TORCH_API friend bool operator==(
54
+ const LBFGSParamState& lhs,
55
+ const LBFGSParamState& rhs);
56
+ };
57
+
58
+ class TORCH_API LBFGS : public Optimizer {
59
+ public:
60
+ explicit LBFGS(
61
+ std::vector<OptimizerParamGroup> param_groups,
62
+ LBFGSOptions defaults = {})
63
+ : Optimizer(
64
+ std::move(param_groups),
65
+ std::make_unique<LBFGSOptions>(defaults)) {
66
+ TORCH_CHECK(
67
+ param_groups_.size() == 1,
68
+ "LBFGS doesn't support per-parameter options (parameter groups)");
69
+ if (defaults.max_eval() == c10::nullopt) {
70
+ auto max_eval_val = (defaults.max_iter() * 5) / 4;
71
+ static_cast<LBFGSOptions&>(param_groups_[0].options())
72
+ .max_eval(max_eval_val);
73
+ static_cast<LBFGSOptions&>(*defaults_.get()).max_eval(max_eval_val);
74
+ }
75
+ _numel_cache = c10::nullopt;
76
+ }
77
+ explicit LBFGS(std::vector<Tensor> params, LBFGSOptions defaults = {})
78
+ : LBFGS({OptimizerParamGroup(std::move(params))}, defaults) {}
79
+
80
+ Tensor step(LossClosure closure) override;
81
+ void save(serialize::OutputArchive& archive) const override;
82
+ void load(serialize::InputArchive& archive) override;
83
+
84
+ private:
85
+ c10::optional<int64_t> _numel_cache;
86
+ int64_t _numel();
87
+ Tensor _gather_flat_grad();
88
+ void _add_grad(const double step_size, const Tensor& update);
89
+ std::tuple<double, Tensor> _directional_evaluate(
90
+ const LossClosure& closure,
91
+ const std::vector<Tensor>& x,
92
+ double t,
93
+ const Tensor& d);
94
+ void _set_param(const std::vector<Tensor>& params_data);
95
+ std::vector<Tensor> _clone_param();
96
+
97
+ template <typename Self, typename Archive>
98
+ static void serialize(Self& self, Archive& archive) {
99
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(LBFGS);
100
+ }
101
+ };
102
+ } // namespace optim
103
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/optimizer.h ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+
7
+ #include <torch/arg.h>
8
+ #include <torch/csrc/Export.h>
9
+
10
+ #include <algorithm>
11
+ #include <functional>
12
+ #include <iterator>
13
+ #include <memory>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ // Forward declarations confuse Doxygen
18
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
19
+ namespace at {
20
+ class Tensor;
21
+ } // namespace at
22
+
23
+ namespace torch {
24
+ using at::Tensor;
25
+ namespace serialize {
26
+ class OutputArchive;
27
+ class InputArchive;
28
+ } // namespace serialize
29
+ } // namespace torch
30
+ #endif // DOXYGEN_SHOULD_SKIP_THIS
31
+
32
+ namespace torch {
33
+ namespace optim {
34
+
35
+ class TORCH_API OptimizerParamState {
36
+ public:
37
+ OptimizerParamState() = default;
38
+ OptimizerParamState(const OptimizerParamState&) = default;
39
+ OptimizerParamState& operator=(const OptimizerParamState&) = default;
40
+ OptimizerParamState(OptimizerParamState&&) noexcept = default;
41
+ OptimizerParamState& operator=(OptimizerParamState&&) noexcept = default;
42
+ virtual std::unique_ptr<OptimizerParamState> clone() const;
43
+ virtual void serialize(torch::serialize::InputArchive& archive);
44
+ virtual void serialize(torch::serialize::OutputArchive& archive) const;
45
+ virtual ~OptimizerParamState() = default;
46
+ };
47
+
48
+ template <typename Derived>
49
+ class OptimizerCloneableParamState : public OptimizerParamState {
50
+ std::unique_ptr<OptimizerParamState> clone() const override {
51
+ return std::make_unique<Derived>(static_cast<const Derived&>(*this));
52
+ }
53
+ };
54
+
55
+ class TORCH_API OptimizerOptions {
56
+ public:
57
+ OptimizerOptions() = default;
58
+ OptimizerOptions(const OptimizerOptions&) = default;
59
+ OptimizerOptions& operator=(const OptimizerOptions&) = default;
60
+ OptimizerOptions(OptimizerOptions&&) noexcept = default;
61
+ OptimizerOptions& operator=(OptimizerOptions&&) noexcept = default;
62
+ virtual std::unique_ptr<OptimizerOptions> clone() const;
63
+ virtual void serialize(torch::serialize::InputArchive& archive);
64
+ virtual void serialize(torch::serialize::OutputArchive& archive) const;
65
+ virtual ~OptimizerOptions() = default;
66
+ virtual double get_lr() const;
67
+ virtual void set_lr(const double lr);
68
+ };
69
+
70
+ template <typename Derived>
71
+ class OptimizerCloneableOptions : public OptimizerOptions {
72
+ private:
73
+ std::unique_ptr<OptimizerOptions> clone() const override {
74
+ return std::make_unique<Derived>(static_cast<const Derived&>(*this));
75
+ }
76
+ };
77
+
78
+ /// Stores parameters in the param_group and stores a pointer to the
79
+ /// OptimizerOptions
80
+ class TORCH_API OptimizerParamGroup {
81
+ public:
82
+ // NOTE: In order to store `OptimizerParamGroup` in a `std::vector`, it has to
83
+ // be copy-constructible.
84
+ OptimizerParamGroup(const OptimizerParamGroup& param_group)
85
+ : params_(param_group.params()),
86
+ options_(
87
+ param_group.has_options() ? param_group.options().clone()
88
+ : nullptr) {}
89
+ OptimizerParamGroup(std::vector<Tensor> params)
90
+ : params_(std::move(params)) {}
91
+ OptimizerParamGroup(
92
+ std::vector<Tensor> params,
93
+ std::unique_ptr<OptimizerOptions> options)
94
+ : params_(std::move(params)), options_(std::move(options)) {}
95
+
96
+ bool has_options() const;
97
+ OptimizerOptions& options();
98
+ const OptimizerOptions& options() const;
99
+ void set_options(std::unique_ptr<OptimizerOptions> options);
100
+ std::vector<Tensor>& params();
101
+ const std::vector<Tensor>& params() const;
102
+
103
+ protected:
104
+ std::vector<Tensor> params_;
105
+ std::unique_ptr<OptimizerOptions> options_;
106
+ };
107
+
108
+ class TORCH_API Optimizer {
109
+ public:
110
+ // The copy constructor is deleted, because the user should use the
111
+ // `state_dict` / `load_state_dict` API to copy an optimizer instead.
112
+ Optimizer(const Optimizer& optimizer) = delete;
113
+ Optimizer(Optimizer&& optimizer) = default;
114
+
115
+ explicit Optimizer(
116
+ std::vector<OptimizerParamGroup> param_groups,
117
+ std::unique_ptr<OptimizerOptions> defaults)
118
+ : defaults_(std::move(defaults)) {
119
+ for (const auto& param_group : param_groups) {
120
+ add_param_group(param_group);
121
+ }
122
+ }
123
+
124
+ /// Constructs the `Optimizer` from a vector of parameters.
125
+ explicit Optimizer(
126
+ std::vector<Tensor> parameters,
127
+ std::unique_ptr<OptimizerOptions> defaults)
128
+ : Optimizer(
129
+ {OptimizerParamGroup(std::move(parameters))},
130
+ std::move(defaults)){};
131
+
132
+ /// Adds the given param_group to the optimizer's param_group list.
133
+ void add_param_group(const OptimizerParamGroup& param_group);
134
+
135
+ virtual ~Optimizer() = default;
136
+
137
+ using LossClosure = std::function<Tensor()>;
138
+ /// A loss function closure, which is expected to return the loss value.
139
+ virtual Tensor step(LossClosure closure = nullptr) = 0;
140
+
141
+ /// Adds the given vector of parameters to the optimizer's parameter list.
142
+ void add_parameters(const std::vector<Tensor>& parameters);
143
+
144
+ /// Zeros out the gradients of all parameters.
145
+ void zero_grad(bool set_to_none = true);
146
+
147
+ /// Provides a const reference to the parameters in the first param_group this
148
+ /// optimizer holds.
149
+ const std::vector<Tensor>& parameters() const noexcept;
150
+
151
+ /// Provides a reference to the parameters in the first param_group this
152
+ /// optimizer holds.
153
+ std::vector<Tensor>& parameters() noexcept;
154
+
155
+ /// Returns the number of parameters referenced by the optimizer.
156
+ size_t size() const noexcept;
157
+
158
+ OptimizerOptions& defaults() noexcept;
159
+
160
+ const OptimizerOptions& defaults() const noexcept;
161
+
162
+ /// Provides a reference to the param_groups this optimizer holds.
163
+ std::vector<OptimizerParamGroup>& param_groups() noexcept;
164
+
165
+ /// Provides a const reference to the param_groups this optimizer holds.
166
+ const std::vector<OptimizerParamGroup>& param_groups() const noexcept;
167
+
168
+ /// Provides a reference to the state this optimizer holds
169
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>&
170
+ state() noexcept;
171
+
172
+ /// Provides a const reference to the state this optimizer holds
173
+ const ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>& state()
174
+ const noexcept;
175
+
176
+ /// Serializes the optimizer state into the given `archive`.
177
+ virtual void save(serialize::OutputArchive& archive) const;
178
+
179
+ /// Deserializes the optimizer state from the given `archive`.
180
+ virtual void load(serialize::InputArchive& archive);
181
+
182
+ protected:
183
+ std::vector<OptimizerParamGroup> param_groups_;
184
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>> state_;
185
+ std::unique_ptr<OptimizerOptions> defaults_;
186
+ };
187
+
188
+ /* How do we decide whether to serialize undefined tensors or
189
+ c10::nullopt values into the output archive?
190
+ Answer: we strictly follow the behavior of Python API. To be more specific:
191
+
192
+ For optimizer options:
193
+ a) For undefined tensor: currently no tensor is used as an options argument in
194
+ Python API, so we don't need to worry about it now. b) For c10::nullopt value:
195
+ we serialize c10::nullopt values into the output archive, to follow the exact
196
+ same behavior as Python API.
197
+
198
+ For optimizer param state:
199
+ a) For undefined tensor: in param state, undefined tensor in C++ impl is
200
+ equivalent to missing key in Python impl. Since we don't serialize missing keys
201
+ in Python API, we skip undefined tensors when serializing the param state. b)
202
+ For c10::nullopt value: in param state, c10::nullopt value in C++ impl is
203
+ equivalent to missing key in Python impl. Since we don't serialize missing keys
204
+ in Python API, we skip c10::nullopt values when serializing the param state. */
205
+
206
+ /// Serializes an `Optimizer` into an `OutputArchive`.
207
+ TORCH_API serialize::OutputArchive& operator<<(
208
+ serialize::OutputArchive& archive,
209
+ const Optimizer& optimizer);
210
+
211
+ /// Deserializes a `Tensor` from an `InputArchive`.
212
+ TORCH_API serialize::InputArchive& operator>>(
213
+ serialize::InputArchive& archive,
214
+ Optimizer& optimizer);
215
+
216
+ } // namespace optim
217
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/rmsprop.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <functional>
10
+ #include <memory>
11
+ #include <string>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace serialize {
16
+ class OutputArchive;
17
+ class InputArchive;
18
+ } // namespace serialize
19
+ } // namespace torch
20
+
21
+ namespace torch {
22
+ namespace optim {
23
+
24
+ struct TORCH_API RMSpropOptions
25
+ : public OptimizerCloneableOptions<RMSpropOptions> {
26
+ RMSpropOptions(double lr = 1e-2);
27
+ TORCH_ARG(double, lr) = 1e-2;
28
+ TORCH_ARG(double, alpha) = 0.99;
29
+ TORCH_ARG(double, eps) = 1e-8;
30
+ TORCH_ARG(double, weight_decay) = 0;
31
+ TORCH_ARG(double, momentum) = 0;
32
+ TORCH_ARG(bool, centered) = false;
33
+
34
+ public:
35
+ void serialize(torch::serialize::InputArchive& archive) override;
36
+ void serialize(torch::serialize::OutputArchive& archive) const override;
37
+ TORCH_API friend bool operator==(
38
+ const RMSpropOptions& lhs,
39
+ const RMSpropOptions& rhs);
40
+ double get_lr() const override;
41
+ void set_lr(const double lr) override;
42
+ };
43
+
44
+ struct TORCH_API RMSpropParamState
45
+ : public OptimizerCloneableParamState<RMSpropParamState> {
46
+ TORCH_ARG(int64_t, step) = 0;
47
+ TORCH_ARG(torch::Tensor, square_avg);
48
+ TORCH_ARG(torch::Tensor, momentum_buffer) = {};
49
+ TORCH_ARG(torch::Tensor, grad_avg) = {};
50
+
51
+ public:
52
+ void serialize(torch::serialize::InputArchive& archive) override;
53
+ void serialize(torch::serialize::OutputArchive& archive) const override;
54
+ TORCH_API friend bool operator==(
55
+ const RMSpropParamState& lhs,
56
+ const RMSpropParamState& rhs);
57
+ };
58
+
59
+ class TORCH_API RMSprop : public Optimizer {
60
+ public:
61
+ explicit RMSprop(
62
+ std::vector<OptimizerParamGroup> param_groups,
63
+ RMSpropOptions defaults = {})
64
+ : Optimizer(
65
+ std::move(param_groups),
66
+ std::make_unique<RMSpropOptions>(defaults)) {
67
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
68
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
69
+ TORCH_CHECK(
70
+ defaults.momentum() >= 0,
71
+ "Invalid momentum value: ",
72
+ defaults.momentum());
73
+ TORCH_CHECK(
74
+ defaults.weight_decay() >= 0,
75
+ "Invalid weight_decay value: ",
76
+ defaults.weight_decay());
77
+ TORCH_CHECK(
78
+ defaults.alpha() >= 0, "Invalid alpha value: ", defaults.alpha());
79
+ }
80
+
81
+ explicit RMSprop(std::vector<Tensor> params, RMSpropOptions defaults = {})
82
+ : RMSprop({OptimizerParamGroup(std::move(params))}, defaults) {}
83
+
84
+ torch::Tensor step(LossClosure closure = nullptr) override;
85
+ void save(serialize::OutputArchive& archive) const override;
86
+ void load(serialize::InputArchive& archive) override;
87
+
88
+ private:
89
+ template <typename Self, typename Archive>
90
+ static void serialize(Self& self, Archive& archive) {
91
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(RMSprop);
92
+ }
93
+ };
94
+ } // namespace optim
95
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/serialize.h ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/serialize/archive.h>
6
+ #include <torch/types.h>
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <deque>
10
+ #include <string>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace optim {
15
+ namespace detail {
16
+ // Utility function to save state
17
+ template <typename DerivedOptimizerParamState>
18
+ void serialize(
19
+ serialize::OutputArchive& archive,
20
+ const ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>&
21
+ state) {
22
+ for (const auto& item : state) {
23
+ serialize::OutputArchive param_state_archive(archive.compilation_unit());
24
+ std::string tensorimpl_key =
25
+ std::to_string(reinterpret_cast<size_t>(item.first));
26
+ const DerivedOptimizerParamState& curr_state =
27
+ static_cast<const DerivedOptimizerParamState&>(*(item.second.get()));
28
+ curr_state.serialize(param_state_archive);
29
+ archive.write(tensorimpl_key, param_state_archive);
30
+ }
31
+ }
32
+
33
+ // Utility function to load state
34
+ template <typename DerivedOptimizerParamState>
35
+ void serialize(
36
+ serialize::InputArchive& archive,
37
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>& state) {
38
+ std::vector<std::string> tensorimpl_keys = archive.keys();
39
+ for (const std::string& tensorimpl_key : tensorimpl_keys) {
40
+ serialize::InputArchive param_state_archive;
41
+ archive.read(tensorimpl_key, param_state_archive);
42
+ DerivedOptimizerParamState param_state;
43
+ param_state.serialize(param_state_archive);
44
+ state[reinterpret_cast<void*>(std::stoull(tensorimpl_key))] =
45
+ std::make_unique<DerivedOptimizerParamState>(param_state);
46
+ }
47
+ }
48
+
49
+ // Utility function to save param_groups
50
+ template <typename DerivedOptimizerParamOptions>
51
+ void serialize(
52
+ serialize::OutputArchive& archive,
53
+ const std::vector<OptimizerParamGroup>& param_groups) {
54
+ archive.write(
55
+ "param_groups/size",
56
+ torch::tensor(static_cast<int64_t>(param_groups.size())));
57
+ for (const auto i : c10::irange(param_groups.size())) {
58
+ serialize::OutputArchive param_group_archive(archive.compilation_unit());
59
+ std::vector<Tensor> params = param_groups[i].params();
60
+ param_group_archive.write(
61
+ "params/size", torch::tensor(static_cast<int64_t>(params.size())));
62
+ for (const auto index : c10::irange(params.size())) {
63
+ param_group_archive.write(
64
+ "params/" + std::to_string(index),
65
+ IValue(std::to_string(
66
+ reinterpret_cast<size_t>(params[index].unsafeGetTensorImpl()))));
67
+ }
68
+ const DerivedOptimizerParamOptions& param_group_options =
69
+ static_cast<const DerivedOptimizerParamOptions&>(
70
+ param_groups[i].options());
71
+ serialize::OutputArchive param_group_options_archive(
72
+ param_group_archive.compilation_unit());
73
+ param_group_options.serialize(param_group_options_archive);
74
+ param_group_archive.write("options", param_group_options_archive);
75
+ archive.write("param_groups/" + std::to_string(i), param_group_archive);
76
+ }
77
+ }
78
+
79
+ // Utility function to load param_groups
80
+ // We take as input vector of pair of string and unique_ptr to optimizer options
81
+ // so that we can retain the state for each param by using the old tensor impl
82
+ // keys (saved during serialization) and map the new tensor impl keys to the
83
+ // correct state for each param
84
+ template <typename DerivedOptimizerParamOptions>
85
+ void serialize(
86
+ serialize::InputArchive& archive,
87
+ std::vector<
88
+ std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>&
89
+ param_groups) {
90
+ torch::Tensor param_groups_size_tensor;
91
+ archive.read("param_groups/size", param_groups_size_tensor);
92
+ const int64_t param_groups_size = param_groups_size_tensor.item<int64_t>();
93
+ for (const auto i : c10::irange(param_groups_size)) {
94
+ serialize::InputArchive param_group_archive;
95
+ archive.read("param_groups/" + std::to_string(i), param_group_archive);
96
+ torch::Tensor size_tensor;
97
+ param_group_archive.read("params/size", size_tensor);
98
+ const int64_t size = size_tensor.item<int64_t>();
99
+ std::vector<std::string> params;
100
+ for (const auto index : c10::irange(size)) {
101
+ IValue ivalue;
102
+ param_group_archive.read("params/" + std::to_string(index), ivalue);
103
+ std::string element = ivalue.toStringRef();
104
+ params.emplace_back(element);
105
+ }
106
+ serialize::InputArchive param_group_options_archive;
107
+ param_group_archive.read("options", param_group_options_archive);
108
+ DerivedOptimizerParamOptions param_group_options(0);
109
+ param_group_options.serialize(param_group_options_archive);
110
+ param_groups.emplace_back(std::make_pair(
111
+ params,
112
+ std::make_unique<DerivedOptimizerParamOptions>(param_group_options)));
113
+ }
114
+ }
115
+ } // namespace detail
116
+
117
+ // Note: These functions are all called `serialize()` so they can be called
118
+ // inside a template where the archive type is a template type and can thus be
119
+ // passed such that the appropriate overload is selected.
120
+
121
+ /// Utility function to save a value of `int64_t` type.
122
+ void serialize(
123
+ serialize::OutputArchive& archive,
124
+ const std::string& key,
125
+ const int64_t& value);
126
+
127
+ /// Utility function to load a value of `int64_t` type.
128
+ void serialize(
129
+ serialize::InputArchive& archive,
130
+ const std::string& key,
131
+ int64_t& value);
132
+
133
+ /// Utility function to save a vector of step buffers.
134
+ void serialize(
135
+ serialize::OutputArchive& archive,
136
+ const std::string& key,
137
+ const std::vector<int64_t>& steps);
138
+
139
+ /// Utility function to load a vector of step buffers.
140
+ void serialize(
141
+ serialize::InputArchive& archive,
142
+ const std::string& key,
143
+ std::vector<int64_t>& steps);
144
+
145
+ // Utility function to save state and param_groups
146
+ template <
147
+ typename DerivedOptimizerParamState,
148
+ typename DerivedOptimizerParamOptions>
149
+ void serialize(serialize::OutputArchive& archive, const Optimizer& optimizer) {
150
+ archive.write("pytorch_version", IValue("1.5.0"));
151
+ serialize::OutputArchive state_archive(archive.compilation_unit());
152
+ detail::serialize<DerivedOptimizerParamState>(
153
+ state_archive, optimizer.state());
154
+ archive.write("state", state_archive);
155
+
156
+ serialize::OutputArchive param_groups_archive(archive.compilation_unit());
157
+ detail::serialize<DerivedOptimizerParamOptions>(
158
+ param_groups_archive, optimizer.param_groups());
159
+ archive.write("param_groups", param_groups_archive);
160
+ }
161
+
162
+ // Utility function to load state and param_groups and update state
163
+ template <
164
+ typename DerivedOptimizerParamState,
165
+ typename DerivedOptimizerParamOptions>
166
+ void serialize(serialize::InputArchive& archive, Optimizer& optimizer) {
167
+ IValue pytorch_version;
168
+ archive.read("pytorch_version", pytorch_version);
169
+ TORCH_INTERNAL_ASSERT(pytorch_version.toStringRef() == "1.5.0");
170
+ serialize::InputArchive state_archive;
171
+ archive.read("state", state_archive);
172
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>> saved_state;
173
+ detail::serialize<DerivedOptimizerParamState>(state_archive, saved_state);
174
+
175
+ serialize::InputArchive param_groups_archive;
176
+ archive.read("param_groups", param_groups_archive);
177
+ std::vector<
178
+ std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>
179
+ saved_param_groups;
180
+ detail::serialize<DerivedOptimizerParamOptions>(
181
+ param_groups_archive, saved_param_groups);
182
+
183
+ // update state
184
+ TORCH_CHECK(
185
+ saved_param_groups.size() == optimizer.param_groups().size(),
186
+ "loaded state dict has a different number of parameter groups");
187
+ for (const auto i : c10::irange(saved_param_groups.size())) {
188
+ std::vector<std::string> param_group_old_keys = saved_param_groups[i].first;
189
+ std::vector<Tensor> params = optimizer.param_groups()[i].params();
190
+ TORCH_CHECK(
191
+ param_group_old_keys.size() == params.size(),
192
+ "loaded state dict contains a parameter group that has a different size than the optimizer's parameter group");
193
+
194
+ for (const auto idx : c10::irange(params.size())) {
195
+ auto param_group_old_key =
196
+ reinterpret_cast<void*>(std::stoull(param_group_old_keys[idx]));
197
+ if (saved_state.find(param_group_old_key) != saved_state.end()) {
198
+ optimizer.state()[params[idx].unsafeGetTensorImpl()] =
199
+ std::move(saved_state[param_group_old_key]);
200
+ }
201
+ }
202
+ }
203
+ }
204
+
205
+ /// Utility function to save a vector of buffers.
206
+ template <typename BufferContainer>
207
+ void serialize(
208
+ serialize::OutputArchive& archive,
209
+ const std::string& key,
210
+ const BufferContainer& buffers) {
211
+ archive.write(
212
+ key + "/size", torch::tensor(static_cast<int64_t>(buffers.size())));
213
+ for (const auto index : c10::irange(buffers.size())) {
214
+ archive.write(
215
+ key + "/" + std::to_string(index), buffers[index], /*is_buffer=*/true);
216
+ }
217
+ }
218
+
219
+ /// Utility function to load a vector of buffers.
220
+ template <typename BufferContainer>
221
+ void serialize(
222
+ serialize::InputArchive& archive,
223
+ const std::string& key,
224
+ BufferContainer& buffers) {
225
+ buffers.clear();
226
+ torch::Tensor size_tensor;
227
+ archive.read(key + "/size", size_tensor);
228
+ const size_t size = size_tensor.item<int64_t>();
229
+ for (const auto index : c10::irange(size)) {
230
+ buffers.emplace_back();
231
+ archive.read(
232
+ key + "/" + std::to_string(index), buffers.back(), /*is_buffer=*/true);
233
+ }
234
+ }
235
+
236
+ template <typename T>
237
+ c10::List<T> deque_to_list(const std::deque<T>& dq) {
238
+ c10::List<T> list;
239
+ list.reserve(dq.size());
240
+ for (const auto& e : dq) {
241
+ list.emplace_back(e);
242
+ }
243
+ return list;
244
+ }
245
+
246
+ template <typename T>
247
+ std::deque<T> list_to_deque(const c10::List<T>& list) {
248
+ std::deque<T> dq;
249
+ for (const auto& e : list) {
250
+ dq.emplace_back(e);
251
+ }
252
+ return dq;
253
+ }
254
+
255
+ #define _TORCH_OPTIM_SERIALIZE(name) \
256
+ torch::optim::serialize(archive, #name, self.name)
257
+
258
+ #define _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(OptimizerName) \
259
+ torch::optim::serialize<OptimizerName##ParamState, OptimizerName##Options>( \
260
+ archive, self)
261
+
262
+ #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG(name) \
263
+ { \
264
+ auto ivalue = torch::IValue(name()); \
265
+ /* do not serialize if name is an undefined tensor*/ \
266
+ if (!(ivalue.isTensor() && \
267
+ ivalue.unsafeToTensorImpl() == \
268
+ at::UndefinedTensorImpl::singleton())) { \
269
+ archive.write(#name, ivalue); \
270
+ } \
271
+ }
272
+
273
+ #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(name) \
274
+ { \
275
+ c10::IValue ivalue = torch::IValue(deque_to_list(name())); \
276
+ archive.write(#name, ivalue); \
277
+ }
278
+
279
+ #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG(T, name) \
280
+ { \
281
+ c10::IValue ivalue; \
282
+ bool exists = archive.try_read(#name, ivalue); \
283
+ if (exists) { \
284
+ name(ivalue.to<T>()); \
285
+ } else { \
286
+ bool is_tensor_type = std::is_base_of<torch::Tensor, T>::value; \
287
+ TORCH_INTERNAL_ASSERT(is_tensor_type); \
288
+ } \
289
+ }
290
+
291
+ #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_OPTIONAL(T, name) \
292
+ { \
293
+ c10::IValue ivalue; \
294
+ bool exists = archive.try_read(#name, ivalue); \
295
+ if (exists) { \
296
+ name(ivalue.toOptional<T>()); \
297
+ } \
298
+ }
299
+
300
+ #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_DEQUE(T, name) \
301
+ { \
302
+ c10::IValue ivalue; \
303
+ archive.read(#name, ivalue); \
304
+ auto list = ivalue.to<c10::List<T::value_type>>(); \
305
+ name(list_to_deque(list)); \
306
+ }
307
+
308
+ } // namespace optim
309
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/ordered_dict.h ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <initializer_list>
5
+ #include <string>
6
+ #include <unordered_map>
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ /// An ordered dictionary implementation, akin to Python's `OrderedDict`.
12
+ template <typename Key, typename Value>
13
+ class OrderedDict {
14
+ public:
15
+ /// A (key, value) pair.
16
+ class Item;
17
+
18
+ // The lifetime of an iterator is bound to the lifetime of the `OrderedDict`.
19
+ // Further, any `insert()` operation may invalidate all iterators
20
+ // pointing into the vector.
21
+ using Iterator = typename std::vector<Item>::iterator;
22
+ using ConstIterator = typename std::vector<Item>::const_iterator;
23
+
24
+ /// Constructs the `OrderedDict` with a short description of the kinds of keys
25
+ /// stored in the `OrderedDict`. This description is used in error messages
26
+ /// thrown by the `OrderedDict`.
27
+ explicit OrderedDict(std::string key_description = "Key");
28
+
29
+ /// Copy constructs this `OrderedDict` from `other`.
30
+ OrderedDict(const OrderedDict& other);
31
+
32
+ /// Assigns items from `other` to this `OrderedDict`.
33
+ OrderedDict& operator=(const OrderedDict& other);
34
+
35
+ // NB: Move works by default, because you can move-construct vectors of const
36
+ // values. I tried to make this noexcept (conditional on the move constructors
37
+ // of index_ and items_ being noexcept) but the obvious spelling didn't
38
+ // compile on Windows.
39
+ OrderedDict(OrderedDict&& other) noexcept = default;
40
+ OrderedDict& operator=(OrderedDict&& other) noexcept = default;
41
+
42
+ ~OrderedDict() = default;
43
+
44
+ /// Constructs a new `OrderedDict` and pre-populates it with the given
45
+ /// `Item`s.
46
+ /*implicit */ OrderedDict(std::initializer_list<Item> initializer_list);
47
+
48
+ /// Returns the key description string the `OrderedDict` was constructed with.
49
+ const std::string& key_description() const noexcept;
50
+
51
+ // Element Access
52
+
53
+ /// Returns the very first item in the `OrderedDict` and throws an exception
54
+ /// if it is empty.
55
+ Item& front();
56
+
57
+ /// Returns the very first item in the `OrderedDict` and throws an exception
58
+ /// if it is empty.
59
+ const Item& front() const;
60
+
61
+ /// Returns the very last item in the `OrderedDict` and throws an exception
62
+ /// if it is empty.
63
+ Item& back();
64
+
65
+ /// Returns the very last item in the `OrderedDict` and throws an exception
66
+ /// if it is empty.
67
+ const Item& back() const;
68
+
69
+ /// Returns the item at the `index`-th position in the `OrderedDict`. Throws
70
+ /// an exception if the index is out of bounds.
71
+ Item& operator[](size_t index);
72
+
73
+ /// Returns the item at the `index`-th position in the `OrderedDict`. Throws
74
+ /// an exception if the index is out of bounds.
75
+ const Item& operator[](size_t index) const;
76
+
77
+ /// Returns the value associated with the given `key`. Throws an exception if
78
+ /// no such key is stored in the `OrderedDict`. Use `find()` for a
79
+ /// non-throwing way of accessing a value if it is present.
80
+ Value& operator[](const Key& key);
81
+
82
+ /// Returns the value associated with the given `key`. Throws an exception if
83
+ /// no such key is stored in the `OrderedDict`. Use `find()` for a
84
+ /// non-throwing way of accessing a value if it is present.
85
+ const Value& operator[](const Key& key) const;
86
+
87
+ // Lookup
88
+
89
+ /// Returns a pointer to the value associated with the given key, or a
90
+ /// `nullptr` if no such key is stored in the `OrderedDict`.
91
+ Value* find(const Key& key) noexcept;
92
+
93
+ /// Returns a pointer to the value associated with the given key, or a
94
+ /// `nullptr` if no such key is stored in the `OrderedDict`.
95
+ const Value* find(const Key& key) const noexcept;
96
+
97
+ /// Returns true if the key is present in the `OrderedDict`.
98
+ bool contains(const Key& key) const noexcept;
99
+
100
+ // Iterators
101
+
102
+ /// Returns an iterator to the first item in the `OrderedDict`. Iteration is
103
+ /// ordered.
104
+ Iterator begin();
105
+
106
+ /// Returns an iterator to the first item in the `OrderedDict`. Iteration is
107
+ /// ordered.
108
+ ConstIterator begin() const;
109
+
110
+ /// Returns an iterator one past the last item in the `OrderedDict`.
111
+ Iterator end();
112
+
113
+ /// Returns an iterator one past the last item in the `OrderedDict`.
114
+ ConstIterator end() const;
115
+
116
+ // Capacity
117
+
118
+ /// Returns the number of items currently stored in the `OrderedDict`.
119
+ size_t size() const noexcept;
120
+
121
+ /// Returns true if the `OrderedDict` contains no elements.
122
+ bool is_empty() const noexcept;
123
+
124
+ /// Resizes internal storage to fit at least `requested_capacity` items
125
+ /// without requiring reallocation.
126
+ void reserve(size_t requested_capacity);
127
+
128
+ // Modifiers
129
+
130
+ /// Inserts a new `(key, value)` pair into the `OrderedDict`. Throws an
131
+ /// exception if the key is already present. If insertion is successful,
132
+ /// immediately returns a reference to the inserted value.
133
+ template <typename K, typename V>
134
+ Value& insert(K&& key, V&& value);
135
+
136
+ /// Inserts a new `(key, value)` pair into the `OrderedDict`. Throws an
137
+ /// exception if the key is already present. If insertion is successful,
138
+ /// immediately returns a reference to the inserted value.
139
+ Value& insert(Key key, Value&& value);
140
+
141
+ /// Inserts all items from `other` into this `OrderedDict`. If any key from
142
+ /// `other` is already present in this `OrderedDict`, an exception is thrown.
143
+ void update(OrderedDict&& other);
144
+
145
+ /// Inserts all items from `other` into this `OrderedDict`. If any key from
146
+ /// `other` is already present in this `OrderedDict`, an exception is thrown.
147
+ void update(const OrderedDict& other);
148
+
149
+ /// Removes the item that has `key` from this `OrderedDict` if exists and if
150
+ /// it doesn't an exception is thrown.
151
+ void erase(const Key& key);
152
+
153
+ /// Removes all items from this `OrderedDict`.
154
+ void clear();
155
+
156
+ // Observers
157
+
158
+ /// Returns the items stored in the `OrderedDict`.
159
+ const std::vector<Item>& items() const noexcept;
160
+
161
+ /// Returns a newly allocated vector and copies all keys from this
162
+ /// `OrderedDict` into the vector.
163
+ ::std::vector<Key> keys() const;
164
+
165
+ /// Returns a newly allocated vector and copies all values from this
166
+ /// `OrderedDict` into the vector.
167
+ ::std::vector<Value> values() const;
168
+
169
+ /// Returns a newly allocated vector and copies all keys and values from this
170
+ /// `OrderedDict` into a vector of `std::pair<Key, Value>`.
171
+ ::std::vector<std::pair<Key, Value>> pairs() const;
172
+
173
+ /// Returns true if both dicts contain the same keys and values, in the same
174
+ /// order.
175
+ template <typename K, typename V>
176
+ friend bool operator==(
177
+ const OrderedDict<K, V>& a,
178
+ const OrderedDict<K, V>& b);
179
+
180
+ private:
181
+ /// A mapping from a key to an index into the `items_` vector.
182
+ ::std::unordered_map<Key, size_t> index_;
183
+
184
+ /// The items stored in the `OrderedDict`.
185
+ ::std::vector<Item> items_;
186
+
187
+ /// A description of the keys stored in the `OrderedDict`.
188
+ ::std::string key_description_{"Key"};
189
+ };
190
+
191
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
192
+
193
+ template <typename Key, typename Value>
194
+ class OrderedDict<Key, Value>::Item {
195
+ public:
196
+ /// Constructs a new item.
197
+ Item(Key key, Value value) : pair_(std::move(key), std::move(value)) {}
198
+
199
+ /// Returns a reference to the value.
200
+ Value& operator*() {
201
+ return value();
202
+ }
203
+
204
+ /// Returns a reference to the value.
205
+ const Value& operator*() const {
206
+ return value();
207
+ }
208
+
209
+ /// Allows access to the value using the arrow operator.
210
+ Value* operator->() {
211
+ return &value();
212
+ }
213
+
214
+ /// Allows access to the value using the arrow operator.
215
+ const Value* operator->() const {
216
+ return &value();
217
+ }
218
+
219
+ /// Returns a reference to the key.
220
+ const Key& key() const noexcept {
221
+ return pair_.first;
222
+ }
223
+
224
+ /// Returns a reference to the value.
225
+ Value& value() noexcept {
226
+ return pair_.second;
227
+ }
228
+
229
+ /// Returns a reference to the value.
230
+ const Value& value() const noexcept {
231
+ return pair_.second;
232
+ }
233
+
234
+ /// Returns a `(key, value)` pair.
235
+ const std::pair<Key, Value>& pair() const noexcept {
236
+ return pair_;
237
+ }
238
+
239
+ private:
240
+ /// This is stored as an std::pair because it will make Python binding a lot,
241
+ /// lot easier.
242
+ ::std::pair<Key, Value> pair_;
243
+ };
244
+
245
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
246
+
247
+ template <typename Key, typename Value>
248
+ OrderedDict<Key, Value>::OrderedDict(std::string key_description)
249
+ : key_description_(std::move(key_description)) {}
250
+
251
+ template <typename Key, typename Value>
252
+ OrderedDict<Key, Value>::OrderedDict(const OrderedDict& other)
253
+ : index_(other.index_), key_description_(other.key_description_) {
254
+ // Copy we have to do ourselves, because items' keys are const, so we have to
255
+ // re-insert the items.
256
+ for (const auto& item : other.items_) {
257
+ items_.push_back(item);
258
+ }
259
+ }
260
+
261
+ template <typename Key, typename Value>
262
+ OrderedDict<Key, Value>& OrderedDict<Key, Value>::operator=(
263
+ const OrderedDict& other) {
264
+ index_ = other.index_;
265
+ items_.clear();
266
+ for (auto& item : other.items_) {
267
+ items_.push_back(item);
268
+ }
269
+ key_description_ = other.key_description_;
270
+ return *this;
271
+ }
272
+
273
+ template <typename Key, typename Value>
274
+ OrderedDict<Key, Value>::OrderedDict(
275
+ std::initializer_list<Item> initializer_list)
276
+ : OrderedDict("Key") {
277
+ items_.reserve(initializer_list.size());
278
+ for (auto& item : initializer_list) {
279
+ // Copy the key here and move it into the index.
280
+ items_.emplace_back(item.key(), std::move(item.value()));
281
+ index_.emplace(std::move(item.key()), size() - 1);
282
+ }
283
+ }
284
+
285
+ template <typename Key, typename Value>
286
+ typename OrderedDict<Key, Value>::Iterator OrderedDict<Key, Value>::begin() {
287
+ return items_.begin();
288
+ }
289
+
290
+ template <typename Key, typename Value>
291
+ typename OrderedDict<Key, Value>::ConstIterator OrderedDict<Key, Value>::begin()
292
+ const {
293
+ return items_.begin();
294
+ }
295
+
296
+ template <typename Key, typename Value>
297
+ typename OrderedDict<Key, Value>::Iterator OrderedDict<Key, Value>::end() {
298
+ return items_.end();
299
+ }
300
+
301
+ template <typename Key, typename Value>
302
+ typename OrderedDict<Key, Value>::ConstIterator OrderedDict<Key, Value>::end()
303
+ const {
304
+ return items_.end();
305
+ }
306
+
307
+ template <typename Key, typename Value>
308
+ typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::front() {
309
+ TORCH_CHECK(!items_.empty(), "Called front() on an empty OrderedDict");
310
+ return items_.front();
311
+ }
312
+
313
+ template <typename Key, typename Value>
314
+ const typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::front()
315
+ const {
316
+ TORCH_CHECK(!items_.empty(), "Called front() on an empty OrderedDict");
317
+ return items_.front();
318
+ }
319
+
320
+ template <typename Key, typename Value>
321
+ typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::back() {
322
+ TORCH_CHECK(!items_.empty(), "Called back() on an empty OrderedDict");
323
+ return items_.back();
324
+ }
325
+
326
+ template <typename Key, typename Value>
327
+ const typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::back()
328
+ const {
329
+ TORCH_CHECK(!items_.empty(), "Called back() on an empty OrderedDict");
330
+ return items_.back();
331
+ }
332
+
333
+ template <typename Key, typename Value>
334
+ typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::operator[](
335
+ size_t index) {
336
+ TORCH_CHECK(index < items_.size(), "Index ", index, " is out of bounds");
337
+ return items_[index];
338
+ }
339
+
340
+ template <typename Key, typename Value>
341
+ const typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::
342
+ operator[](size_t index) const {
343
+ TORCH_CHECK(index < items_.size(), "Index ", index, " is out of bounds");
344
+ return items_[index];
345
+ }
346
+
347
+ template <typename Key, typename Value>
348
+ Value& OrderedDict<Key, Value>::operator[](const Key& key) {
349
+ if (auto* value = find(key)) {
350
+ return *value;
351
+ }
352
+ AT_ERROR(key_description_, " '", key, "' is not defined");
353
+ }
354
+
355
+ template <typename Key, typename Value>
356
+ const Value& OrderedDict<Key, Value>::operator[](const Key& key) const {
357
+ if (auto* value = find(key)) {
358
+ return *value;
359
+ }
360
+ AT_ERROR(key_description_, " '", key, "' is not defined");
361
+ }
362
+
363
+ template <typename Key, typename Value>
364
+ template <typename K, typename V>
365
+ Value& OrderedDict<Key, Value>::insert(K&& key, V&& value) {
366
+ TORCH_CHECK(
367
+ index_.count(key) == 0, key_description_, " '", key, "' already defined");
368
+ // Copy `key` here and move it into the index.
369
+ items_.emplace_back(key, std::forward<V>(value));
370
+ index_.emplace(std::forward<K>(key), size() - 1);
371
+ return items_.back().value();
372
+ }
373
+
374
+ template <typename Key, typename Value>
375
+ Value& OrderedDict<Key, Value>::insert(Key key, Value&& value) {
376
+ return insert<Key, Value>(std::move(key), std::move(value));
377
+ }
378
+
379
+ template <typename Key, typename Value>
380
+ void OrderedDict<Key, Value>::update(OrderedDict&& other) {
381
+ reserve(size() + other.size());
382
+ for (auto& item : other) {
383
+ // We want to call `insert()` to prevent duplicate keys.
384
+ insert(std::move(item.key()), std::move(item.value()));
385
+ }
386
+ }
387
+
388
+ template <typename Key, typename Value>
389
+ void OrderedDict<Key, Value>::update(const OrderedDict& other) {
390
+ reserve(size() + other.size());
391
+ for (auto& item : other) {
392
+ // We want to call `insert()` to prevent duplicate keys.
393
+ insert(item.key(), item.value());
394
+ }
395
+ }
396
+
397
+ template <typename Key, typename Value>
398
+ Value* OrderedDict<Key, Value>::find(const Key& key) noexcept {
399
+ auto iterator = index_.find(key);
400
+ if (iterator == index_.end()) {
401
+ return nullptr;
402
+ }
403
+ return &items_[iterator->second].value();
404
+ }
405
+
406
+ template <typename Key, typename Value>
407
+ const Value* OrderedDict<Key, Value>::find(const Key& key) const noexcept {
408
+ auto iterator = index_.find(key);
409
+ if (iterator == index_.end()) {
410
+ return nullptr;
411
+ }
412
+ return &items_[iterator->second].value();
413
+ }
414
+
415
+ template <typename Key, typename Value>
416
+ void OrderedDict<Key, Value>::erase(const Key& key) {
417
+ auto it = index_.find(key);
418
+ TORCH_CHECK(it != index_.end(), "Key '", key, "' doesn't exist");
419
+
420
+ auto index = it->second;
421
+ index_.erase(it);
422
+ items_.erase(items_.begin() + index);
423
+
424
+ for (auto& pair : index_)
425
+ if (pair.second > index)
426
+ --pair.second;
427
+ }
428
+
429
+ template <typename Key, typename Value>
430
+ bool OrderedDict<Key, Value>::contains(const Key& key) const noexcept {
431
+ return find(key) != nullptr;
432
+ }
433
+
434
+ template <typename Key, typename Value>
435
+ void OrderedDict<Key, Value>::clear() {
436
+ index_.clear();
437
+ items_.clear();
438
+ }
439
+
440
+ template <typename Key, typename Value>
441
+ size_t OrderedDict<Key, Value>::size() const noexcept {
442
+ return items_.size();
443
+ }
444
+
445
+ template <typename Key, typename Value>
446
+ bool OrderedDict<Key, Value>::is_empty() const noexcept {
447
+ return items_.empty();
448
+ }
449
+
450
+ template <typename Key, typename Value>
451
+ const std::string& OrderedDict<Key, Value>::key_description() const noexcept {
452
+ return key_description_;
453
+ }
454
+
455
+ template <typename Key, typename Value>
456
+ const std::vector<typename OrderedDict<Key, Value>::Item>& OrderedDict<
457
+ Key,
458
+ Value>::items() const noexcept {
459
+ return items_;
460
+ }
461
+
462
+ template <typename Key, typename Value>
463
+ ::std::vector<Key> OrderedDict<Key, Value>::keys() const {
464
+ std::vector<Key> keys;
465
+ keys.reserve(size());
466
+ for (const auto& item : items_) {
467
+ keys.push_back(item.key());
468
+ }
469
+ return keys;
470
+ }
471
+
472
+ template <typename Key, typename Value>
473
+ ::std::vector<Value> OrderedDict<Key, Value>::values() const {
474
+ std::vector<Value> values;
475
+ values.reserve(size());
476
+ for (const auto& item : items_) {
477
+ values.push_back(item.value());
478
+ }
479
+ return values;
480
+ }
481
+
482
+ template <typename Key, typename Value>
483
+ ::std::vector<std::pair<Key, Value>> OrderedDict<Key, Value>::pairs() const {
484
+ std::vector<std::pair<Key, Value>> values;
485
+ values.reserve(size());
486
+ for (const auto& item : items_) {
487
+ values.push_back(item.pair());
488
+ }
489
+ return values;
490
+ }
491
+
492
+ template <typename Key, typename Value>
493
+ void OrderedDict<Key, Value>::reserve(size_t requested_capacity) {
494
+ index_.reserve(requested_capacity);
495
+ items_.reserve(requested_capacity);
496
+ }
497
+
498
+ template <typename K, typename V>
499
+ bool operator==(
500
+ const torch::OrderedDict<K, V>& a,
501
+ const torch::OrderedDict<K, V>& b) {
502
+ using Item = typename torch::OrderedDict<K, V>::Item;
503
+ if (a.index_ != b.index_)
504
+ return false;
505
+ if (a.items_.size() != b.items_.size())
506
+ return false;
507
+ // NOTE: There's no point in comparing keys for items_, as we already know
508
+ // that index is equal.
509
+ return std::equal(
510
+ a.items_.begin(),
511
+ a.items_.end(),
512
+ b.items_.begin(),
513
+ [](const Item& a, const Item& b) { return a.value() == b.value(); });
514
+ }
515
+
516
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/special.h ADDED
@@ -0,0 +1,1405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <torch/types.h>
5
+
6
+ namespace torch {
7
+ namespace special {
8
+
9
+ /// Computes the natural logarithm of the absolute value of the gamma function
10
+ /// See https://pytorch.org/docs/master/special.html#torch.special.gammaln.
11
+ ///
12
+ /// Example:
13
+ /// ```
14
+ /// auto t = torch::randn(128, dtype=kDouble);
15
+ /// torch::special::gammaln(t);
16
+ /// ```
17
+ inline Tensor gammaln(const Tensor& self) {
18
+ return torch::special_gammaln(self);
19
+ }
20
+
21
+ inline Tensor& gammaln_out(Tensor& result, const Tensor& self) {
22
+ return torch::special_gammaln_out(result, self);
23
+ }
24
+
25
+ /// Computes the regularized lower incomplete gamma function
26
+ /// See https://pytorch.org/docs/master/special.html#torch.special.gammainc.
27
+ ///
28
+ /// Example:
29
+ /// ```
30
+ /// auto t = torch::randn(128, dtype=kDouble);
31
+ /// auto s = torch::randn(128, dtype=kDouble);
32
+ /// torch::special::gammainc(s, t);
33
+ /// ```
34
+ inline Tensor gammainc(const Tensor& self, const Tensor& other) {
35
+ return torch::special_gammainc(self, other);
36
+ }
37
+
38
+ inline Tensor& gammainc_out(
39
+ Tensor& result,
40
+ const Tensor& self,
41
+ const Tensor& other) {
42
+ return torch::special_gammainc_out(result, self, other);
43
+ }
44
+
45
+ /// Computes the regularized upper incomplete gamma function
46
+ /// See https://pytorch.org/docs/master/special.html#torch.special.gammainc.
47
+ ///
48
+ /// Example:
49
+ /// ```
50
+ /// auto t = torch::randn(128, dtype=kDouble);
51
+ /// auto s = torch::randn(128, dtype=kDouble);
52
+ /// torch::special::gammaincc(s, t);
53
+ /// ```
54
+ inline Tensor gammaincc(const Tensor& self, const Tensor& other) {
55
+ return torch::special_gammaincc(self, other);
56
+ }
57
+
58
+ inline Tensor& gammaincc_out(
59
+ Tensor& result,
60
+ const Tensor& self,
61
+ const Tensor& other) {
62
+ return torch::special_gammaincc_out(result, self, other);
63
+ }
64
+
65
+ /// Computes the multivariate log-gamma function with dimension `p`, elementwise
66
+ /// See https://pytorch.org/docs/master/special.html#torch.special.multigammaln.
67
+ ///
68
+ /// Example:
69
+ /// ```
70
+ /// auto t = torch::randn(128, dtype=kDouble);
71
+ /// torch::special::multigammaln(t, 1);
72
+ /// ```
73
+ inline Tensor multigammaln(const Tensor& self, int64_t p) {
74
+ return torch::special_multigammaln(self, p);
75
+ }
76
+
77
+ inline Tensor& multigammaln_out(Tensor& result, const Tensor& self, int64_t p) {
78
+ return torch::special_multigammaln_out(result, self, p);
79
+ }
80
+
81
+ /// Computes the nth derivative of the digamma function on the input.
82
+ /// See https:://pytorch.org/docs/master/special.html#torch.special.polygamma.
83
+ ///
84
+ /// Example:
85
+ /// ```
86
+ /// auto t = torch::randn(128, dtype=kDouble);
87
+ /// torch::special::polygamma(2, t);
88
+ /// ```
89
+ inline Tensor polygamma(int64_t n, const Tensor& self) {
90
+ return torch::special_polygamma(n, self);
91
+ }
92
+
93
+ inline Tensor& polygamma_out(Tensor& result, int64_t n, const Tensor& self) {
94
+ return torch::special_polygamma_out(result, n, self);
95
+ }
96
+
97
+ /// Computes the logarithmic derivative of the gamma function on input
98
+ /// See https://pytorch.org/docs/master/special.html#torch.special.psi
99
+ ///
100
+ /// Example:
101
+ /// ```
102
+ /// auto t = torch::randn(128, dtype=kDouble);
103
+ /// torch::special::psi(t);
104
+ /// ```
105
+ inline Tensor psi(const Tensor& self) {
106
+ return torch::special_psi(self);
107
+ }
108
+
109
+ inline Tensor& psi_out(Tensor& result, const Tensor& self) {
110
+ return torch::special_psi_out(result, self);
111
+ }
112
+
113
+ /// Computes the logarithmic derivative of the gamma function on input
114
+ /// See https://pytorch.org/docs/master/special.html#torch.special.digamma
115
+ ///
116
+ /// Example:
117
+ /// ```
118
+ /// auto t = torch::randn(128, dtype=kDouble);
119
+ /// torch::special::digamma(t);
120
+ /// ```
121
+ inline Tensor digamma(const Tensor& self) {
122
+ return torch::special_digamma(self);
123
+ }
124
+
125
+ inline Tensor& digamma_out(Tensor& result, const Tensor& self) {
126
+ return torch::special_digamma_out(result, self);
127
+ }
128
+
129
+ /// Computes entropy of input, elementwise
130
+ /// See https://pytorch.org/docs/master/special.html#torch.special.entr.
131
+ ///
132
+ /// Example:
133
+ /// ```
134
+ /// auto t = torch::randn(128, dtype=kDouble);
135
+ /// torch::special::entr(t);
136
+ /// ```
137
+ inline Tensor entr(const Tensor& self) {
138
+ return torch::special_entr(self);
139
+ }
140
+
141
+ inline Tensor& entr_out(Tensor& result, const Tensor& self) {
142
+ return torch::special_entr_out(result, self);
143
+ }
144
+
145
+ /// Computes the error function
146
+ /// See https://pytorch.org/docs/master/special.html#torch.special.erf.
147
+ ///
148
+ /// Example:
149
+ /// ```
150
+ /// auto t = torch::randn(128, dtype=kDouble);
151
+ /// torch::special::erf(t);
152
+ /// ```
153
+ inline Tensor erf(const Tensor& self) {
154
+ return torch::special_erf(self);
155
+ }
156
+
157
+ inline Tensor& erf_out(Tensor& result, const Tensor& self) {
158
+ return torch::special_erf_out(result, self);
159
+ }
160
+
161
+ /// Computes the complementary error function
162
+ /// See https://pytorch.org/docs/master/special.html#torch.special.erfc.
163
+ ///
164
+ /// Example:
165
+ /// ```
166
+ /// auto t = torch::randn(128, dtype=kDouble);
167
+ /// torch::special::erfc(t);
168
+ /// ```
169
+ inline Tensor erfc(const Tensor& self) {
170
+ return torch::special_erfc(self);
171
+ }
172
+
173
+ inline Tensor& erfc_out(Tensor& result, const Tensor& self) {
174
+ return torch::special_erfc_out(result, self);
175
+ }
176
+
177
+ /// Computes the scaled complementary error function
178
+ /// See https://pytorch.org/docs/master/special.html#torch.special.erfcx.
179
+ ///
180
+ /// Example:
181
+ /// ```
182
+ /// auto t = torch::randn(128, dtype=kDouble);
183
+ /// torch::special::erfcx(t);
184
+ /// ```
185
+ inline Tensor erfcx(const Tensor& self) {
186
+ return torch::special_erfcx(self);
187
+ }
188
+
189
+ inline Tensor& erfcx_out(Tensor& result, const Tensor& self) {
190
+ return torch::special_erfcx_out(result, self);
191
+ }
192
+
193
+ /// Computes the inverse error function
194
+ /// See https://pytorch.org/docs/master/special.html#torch.special.erfinv.
195
+ ///
196
+ /// Example:
197
+ /// ```
198
+ /// auto t = torch::randn(128, dtype=kDouble);
199
+ /// torch::special::erfinv(t);
200
+ /// ```
201
+ inline Tensor erfinv(const Tensor& self) {
202
+ return torch::special_erfinv(self);
203
+ }
204
+
205
+ inline Tensor& erfinv_out(Tensor& result, const Tensor& self) {
206
+ return torch::special_erfinv_out(result, self);
207
+ }
208
+
209
+ /// Computes the log of summed exponentials of each row of input in the given
210
+ /// dimension dim See
211
+ /// https://pytorch.org/docs/master/special.html#torch.special.logsumexp.
212
+ ///
213
+ /// Example:
214
+ /// ```
215
+ /// auto t = torch::randn(3, 3);
216
+ /// torch::special::logsumexp(t, 1);
217
+ /// ```
218
+ inline Tensor logsumexp(const Tensor& self, IntArrayRef dims, bool keepdim) {
219
+ return torch::special_logsumexp(self, dims, keepdim);
220
+ }
221
+
222
+ inline Tensor& logsumexp_out(
223
+ Tensor& result,
224
+ const Tensor& self,
225
+ IntArrayRef dims,
226
+ bool keepdim) {
227
+ return torch::special_logsumexp_out(result, self, dims, keepdim);
228
+ }
229
+
230
+ /// Computes the argument, x, for which the area under the Gaussian probability
231
+ /// density function (integrated from minus infinity to x) is equal to input,
232
+ /// elementwise. See
233
+ /// https://pytorch.org/docs/master/special.html#torch.special.ndtri
234
+ ///
235
+ /// Example:
236
+ /// ```
237
+ /// auto t = torch::rand(128, dtype=kDouble);
238
+ /// torch::special::ndtri(t);
239
+ /// ```
240
+ inline Tensor ndtri(const Tensor& self) {
241
+ return torch::special_ndtri(self);
242
+ }
243
+
244
+ inline Tensor& ndtri_out(Tensor& result, const Tensor& self) {
245
+ return torch::special_ndtri_out(result, self);
246
+ }
247
+
248
+ /// Computes the log of area under the standard Gaussian probability density
249
+ /// function, integrated from minus infinity to :attr:`input`, elementwise See
250
+ /// https://pytorch.org/docs/master/special.html#torch.special.log_ndtr
251
+ ///
252
+ /// Example:
253
+ /// ```
254
+ /// auto t = torch::randn(128, dtype=kDouble);
255
+ /// torch::special::log_ndtr(t);
256
+ /// ```
257
+ inline Tensor log_ndtr(const Tensor& self) {
258
+ return torch::special_log_ndtr(self);
259
+ }
260
+
261
+ inline Tensor& log_ndtr_out(Tensor& result, const Tensor& self) {
262
+ return torch::special_log_ndtr_out(result, self);
263
+ }
264
+
265
+ /// Computes the logit of input, elementwise.
266
+ /// See https://pytorch.org/docs/master/special.html#torch.special.logit.
267
+ ///
268
+ /// Example:
269
+ /// ```
270
+ /// auto t = torch::randn(128, dtype=kDouble);
271
+ /// torch::special::logit(t);
272
+ /// ```
273
+ inline Tensor logit(const Tensor& self) {
274
+ return torch::special_logit(self);
275
+ }
276
+
277
+ inline Tensor& logit_out(Tensor& result, const Tensor& self) {
278
+ return torch::special_logit_out(result, self);
279
+ }
280
+
281
+ /// Computes the expit (also known as the logistic sigmoid function) of input,
282
+ /// elementwise See
283
+ /// https://pytorch.org/docs/master/special.html#torch.special.expit.
284
+ ///
285
+ /// Example:
286
+ /// ```
287
+ /// auto t = torch::randn(128, dtype=kDouble);
288
+ /// torch::special::expit(t);
289
+ /// ```
290
+ inline Tensor expit(const Tensor& self) {
291
+ return torch::special_expit(self);
292
+ }
293
+
294
+ inline Tensor& expit_out(Tensor& result, const Tensor& self) {
295
+ return torch::special_expit_out(result, self);
296
+ }
297
+
298
+ /// Computes the base two exponential function of :attr:`input`, elementwise
299
+ /// See https://pytorch.org/docs/master/special.html#torch.special.exp2.
300
+ ///
301
+ /// Example:
302
+ /// ```
303
+ /// auto t = torch::randn(128, dtype=kDouble);
304
+ /// torch::special::exp2(t);
305
+ /// ```
306
+ inline Tensor exp2(const Tensor& self) {
307
+ return torch::special_exp2(self);
308
+ }
309
+
310
+ inline Tensor& exp2_out(Tensor& result, const Tensor& self) {
311
+ return torch::special_exp2_out(result, self);
312
+ }
313
+
314
+ /// Computes the exponential of the elements minus 1, elementwise
315
+ /// See https://pytorch.org/docs/master/special.html#torch.special.expm1.
316
+ ///
317
+ /// Example:
318
+ /// ```
319
+ /// auto t = torch::randn(128, dtype=kDouble);
320
+ /// torch::special::expm1(t);
321
+ /// ```
322
+ inline Tensor expm1(const Tensor& self) {
323
+ return torch::special_expm1(self);
324
+ }
325
+
326
+ inline Tensor& expm1_out(Tensor& result, const Tensor& self) {
327
+ return torch::special_expm1_out(result, self);
328
+ }
329
+
330
+ /// Computes x * log(y) for inputs, elementwise
331
+ /// See https://pytorch.org/docs/master/special.html#torch.special.xlogy.
332
+ ///
333
+ /// Example:
334
+ /// ```
335
+ /// auto x = torch::randn(128, dtype=kDouble);
336
+ /// auto y = torch::randn(128, dtype=kDouble);
337
+ /// torch::special::xlogy(x, y);
338
+ /// ```
339
+ inline Tensor xlogy(const Tensor& self, const Tensor& other) {
340
+ return torch::special_xlogy(self, other);
341
+ }
342
+
343
+ inline Tensor xlogy(const Scalar& self, const Tensor& other) {
344
+ return torch::special_xlogy(self, other);
345
+ }
346
+
347
+ inline Tensor xlogy(const Tensor& self, const Scalar& other) {
348
+ return torch::special_xlogy(self, other);
349
+ }
350
+
351
+ inline Tensor& xlogy_out(
352
+ Tensor& result,
353
+ const Tensor& self,
354
+ const Tensor& other) {
355
+ return torch::special_xlogy_out(result, self, other);
356
+ }
357
+
358
+ inline Tensor& xlogy_out(
359
+ Tensor& result,
360
+ const Scalar& self,
361
+ const Tensor& other) {
362
+ return torch::special_xlogy_out(result, self, other);
363
+ }
364
+
365
+ inline Tensor& xlogy_out(
366
+ Tensor& result,
367
+ const Tensor& self,
368
+ const Scalar& other) {
369
+ return torch::special_xlogy_out(result, self, other);
370
+ }
371
+
372
+ /// Computes x * log1p(y) for inputs, elementwise
373
+ /// See https://pytorch.org/docs/master/special.html#torch.special.xlog1py.
374
+ ///
375
+ /// Example:
376
+ /// ```
377
+ /// auto x = torch::randn(128, dtype=kDouble);
378
+ /// auto y = torch::randn(128, dtype=kDouble);
379
+ /// torch::special::xlog1py(x, y);
380
+ /// ```
381
+ inline Tensor xlog1py(const Tensor& self, const Tensor& other) {
382
+ return torch::special_xlog1py(self, other);
383
+ }
384
+
385
+ inline Tensor xlog1py(const Scalar& self, const Tensor& other) {
386
+ return torch::special_xlog1py(self, other);
387
+ }
388
+
389
+ inline Tensor xlog1py(const Tensor& self, const Scalar& other) {
390
+ return torch::special_xlog1py(self, other);
391
+ }
392
+
393
+ inline Tensor& xlog1py_out(
394
+ Tensor& result,
395
+ const Tensor& self,
396
+ const Tensor& other) {
397
+ return torch::special_xlog1py_out(result, self, other);
398
+ }
399
+
400
+ inline Tensor& xlog1py_out(
401
+ Tensor& result,
402
+ const Scalar& self,
403
+ const Tensor& other) {
404
+ return torch::special_xlog1py_out(result, self, other);
405
+ }
406
+
407
+ inline Tensor& xlog1py_out(
408
+ Tensor& result,
409
+ const Tensor& self,
410
+ const Scalar& other) {
411
+ return torch::special_xlog1py_out(result, self, other);
412
+ }
413
+
414
+ /// Computes Hurwitz Zeta function for inputs, elementwise
415
+ /// See https://pytorch.org/docs/master/special.html#torch.special.zeta.
416
+ ///
417
+ /// Example:
418
+ /// ```
419
+ /// auto x = torch::randn(128, dtype=kDouble);
420
+ /// auto y = torch::randn(128, dtype=kDouble);
421
+ /// torch::special::zeta(x, y);
422
+ /// ```
423
+ inline Tensor zeta(const Tensor& self, const Tensor& other) {
424
+ return torch::special_zeta(self, other);
425
+ }
426
+
427
+ inline Tensor zeta(const Scalar& self, const Tensor& other) {
428
+ return torch::special_zeta(self, other);
429
+ }
430
+
431
+ inline Tensor zeta(const Tensor& self, const Scalar& other) {
432
+ return torch::special_zeta(self, other);
433
+ }
434
+
435
+ inline Tensor& zeta_out(
436
+ Tensor& result,
437
+ const Tensor& self,
438
+ const Tensor& other) {
439
+ return torch::special_zeta_out(result, self, other);
440
+ }
441
+
442
+ inline Tensor& zeta_out(
443
+ Tensor& result,
444
+ const Scalar& self,
445
+ const Tensor& other) {
446
+ return torch::special_zeta_out(result, self, other);
447
+ }
448
+
449
+ inline Tensor& zeta_out(
450
+ Tensor& result,
451
+ const Tensor& self,
452
+ const Scalar& other) {
453
+ return torch::special_zeta_out(result, self, other);
454
+ }
455
+
456
+ /// Computes the zeroth order modified Bessel function of the first kind of
457
+ /// input, elementwise See
458
+ /// https://pytorch.org/docs/master/special.html#torch.special.i0
459
+ ///
460
+ /// Example:
461
+ /// ```
462
+ /// auto t = torch::randn(128, dtype=kDouble);
463
+ /// torch::special::i0(t);
464
+ /// ```
465
+ inline Tensor i0(const Tensor& self) {
466
+ return torch::special_i0(self);
467
+ }
468
+
469
+ inline Tensor& i0_out(Tensor& result, const Tensor& self) {
470
+ return torch::special_i0_out(result, self);
471
+ }
472
+
473
+ /// Computes the area under the standard Gaussian probability density function,
474
+ /// integrated from minus infinity to :attr:`input`, elementwise
475
+ /// See https://pytorch.org/docs/master/special.html#torch.special.ndtr
476
+ ///
477
+ /// Example:
478
+ /// ```
479
+ /// auto t = torch::randn(128, dtype=kDouble);
480
+ /// torch::special::ndtr(t);
481
+ /// ```
482
+ inline Tensor ndtr(const Tensor& self) {
483
+ return torch::special_ndtr(self);
484
+ }
485
+
486
+ inline Tensor& ndtr_out(Tensor& result, const Tensor& self) {
487
+ return torch::special_ndtr_out(result, self);
488
+ }
489
+
490
+ /// Computes the exponentially scaled zeroth order modified Bessel function of
491
+ /// the first kind See
492
+ /// https://pytorch.org/docs/master/special.html#torch.special.i0e.
493
+ ///
494
+ /// Example:
495
+ /// ```
496
+ /// auto t = torch::randn(128, dtype=kDouble);
497
+ /// torch::special::i0e(t);
498
+ /// ```
499
+ inline Tensor i0e(const Tensor& self) {
500
+ return torch::special_i0e(self);
501
+ }
502
+
503
+ inline Tensor& i0e_out(Tensor& result, const Tensor& self) {
504
+ return torch::special_i0e_out(result, self);
505
+ }
506
+
507
+ /// Computes the first order modified Bessel function of the first kind
508
+ /// See https://pytorch.org/docs/master/special.html#torch.special.i1.
509
+ ///
510
+ /// Example:
511
+ /// ```
512
+ /// auto t = torch::randn(128, dtype=kDouble);
513
+ /// torch::special::i1(t);
514
+ /// ```
515
+ inline Tensor i1(const Tensor& self) {
516
+ return torch::special_i1(self);
517
+ }
518
+
519
+ inline Tensor& i1_out(Tensor& result, const Tensor& self) {
520
+ return torch::special_i1_out(result, self);
521
+ }
522
+
523
+ /// Computes the exponentially scaled first order modified Bessel function of
524
+ /// the first kind See
525
+ /// https://pytorch.org/docs/master/special.html#torch.special.i1e.
526
+ ///
527
+ /// Example:
528
+ /// ```
529
+ /// auto t = torch::randn(128, dtype=kDouble);
530
+ /// torch::special::i1e(t);
531
+ /// ```
532
+ inline Tensor i1e(const Tensor& self) {
533
+ return torch::special_i1e(self);
534
+ }
535
+
536
+ inline Tensor& i1e_out(Tensor& result, const Tensor& self) {
537
+ return torch::special_i1e_out(result, self);
538
+ }
539
+
540
+ /// Computes the sinc of input, elementwise
541
+ /// See https://pytorch.org/docs/master/special.html#torch.special.sinc.
542
+ ///
543
+ /// Example:
544
+ /// ```
545
+ /// auto t = torch::randn(128, dtype=kDouble);
546
+ /// torch::special::sinc(t);
547
+ /// ```
548
+ inline Tensor sinc(const Tensor& self) {
549
+ return torch::special_sinc(self);
550
+ }
551
+
552
+ inline Tensor& sinc_out(Tensor& result, const Tensor& self) {
553
+ return torch::special_sinc_out(result, self);
554
+ }
555
+
556
+ /// Rounds the elements of the input
557
+ /// See https://pytorch.org/docs/master/special.html#torch.special.round.
558
+ ///
559
+ /// Example:
560
+ /// ```
561
+ /// auto t = torch::randn(128, dtype=kDouble);
562
+ /// torch::special::round(t);
563
+ /// ```
564
+ inline Tensor round(const Tensor& self) {
565
+ return torch::special_round(self);
566
+ }
567
+
568
+ inline Tensor& round_out(Tensor& result, const Tensor& self) {
569
+ return torch::special_round_out(result, self);
570
+ }
571
+
572
+ /// Computes log(1 + x) of the input, elementwise
573
+ /// See https://pytorch.org/docs/master/special.html#torch.special.log1p.
574
+ ///
575
+ /// Example:
576
+ /// ```
577
+ /// auto t = torch::randn(128, dtype=kDouble);
578
+ /// torch::special::log1p(t);
579
+ /// ```
580
+ inline Tensor log1p(const Tensor& self) {
581
+ return torch::special_log1p(self);
582
+ }
583
+
584
+ inline Tensor& log1p_out(Tensor& result, const Tensor& self) {
585
+ return torch::special_log1p_out(result, self);
586
+ }
587
+
588
+ /// Computes log followed by softmax(x) of the input
589
+ /// See https://pytorch.org/docs/master/special.html#torch.special.log_softmax.
590
+ ///
591
+ /// Example:
592
+ /// ```
593
+ /// auto t = torch::randn(128, 128, dtype=kDouble);
594
+ /// torch::special::log_softmax(t, 0);
595
+ /// ```
596
+ inline Tensor log_softmax(
597
+ const Tensor& self,
598
+ int64_t dim,
599
+ c10::optional<ScalarType> dtype) {
600
+ return torch::special_log_softmax(self, dim, dtype);
601
+ }
602
+
603
+ /// Computes softmax of the input along a given dimension
604
+ /// See https://pytorch.org/docs/master/special.html#torch.special.softmax.
605
+ ///
606
+ /// Example:
607
+ /// ```
608
+ /// auto t = torch::randn(128, 128, dtype=kDouble);
609
+ /// torch::special::softmax(t, 0);
610
+ /// ```
611
+ inline Tensor softmax(
612
+ const Tensor& self,
613
+ int64_t dim,
614
+ c10::optional<ScalarType> dtype) {
615
+ return torch::special_softmax(self, dim, dtype);
616
+ }
617
+
618
+ /// Airy function Ai.
619
+ ///
620
+ /// See https://pytorch.org/docs/master/special.html#torch.special.airy_ai.
621
+ ///
622
+ /// Example:
623
+ ///
624
+ /// ```
625
+ /// auto x = torch::randn(128, dtype=kDouble);
626
+ ///
627
+ /// torch::special::airy_ai(x);
628
+ /// ```
629
+ inline Tensor airy_ai(const Tensor& x) {
630
+ return torch::special_airy_ai(x);
631
+ }
632
+
633
+ inline Tensor& airy_ai_out(Tensor& y, const Tensor& x) {
634
+ return torch::special_airy_ai_out(y, x);
635
+ }
636
+
637
+ /// Bessel function of the first kind of order 0.
638
+ ///
639
+ /// See https://pytorch.org/docs/master/special.html#torch.special.bessel_j0.
640
+ ///
641
+ /// Example:
642
+ ///
643
+ /// ```
644
+ /// auto x = torch::randn(128, dtype=kDouble);
645
+ ///
646
+ /// torch::special::bessel_j0(x);
647
+ /// ```
648
+ inline Tensor bessel_j0(const Tensor& self) {
649
+ return torch::special_bessel_j0(self);
650
+ }
651
+
652
+ inline Tensor& bessel_j0_out(Tensor& result, const Tensor& self) {
653
+ return torch::special_bessel_j0_out(result, self);
654
+ }
655
+
656
+ /// Bessel function of the first kind of order 1.
657
+ ///
658
+ /// See https://pytorch.org/docs/master/special.html#torch.special.bessel_j1.
659
+ ///
660
+ /// Example:
661
+ ///
662
+ /// ```
663
+ /// auto x = torch::randn(128, dtype=kDouble);
664
+ ///
665
+ /// torch::special::bessel_j1(x);
666
+ /// ```
667
+ inline Tensor bessel_j1(const Tensor& self) {
668
+ return torch::special_bessel_j1(self);
669
+ }
670
+
671
+ inline Tensor& bessel_j1_out(Tensor& result, const Tensor& self) {
672
+ return torch::special_bessel_j1_out(result, self);
673
+ }
674
+
675
+ /// Bessel function of the second kind of order 0.
676
+ ///
677
+ /// See https://pytorch.org/docs/master/special.html#torch.special.bessel_y0.
678
+ ///
679
+ /// Example:
680
+ ///
681
+ /// ```
682
+ /// auto x = torch::randn(128, dtype=kDouble);
683
+ ///
684
+ /// torch::special::bessel_y0(x);
685
+ /// ```
686
+ inline Tensor bessel_y0(const Tensor& self) {
687
+ return torch::special_bessel_y0(self);
688
+ }
689
+
690
+ inline Tensor& bessel_y0_out(Tensor& result, const Tensor& self) {
691
+ return torch::special_bessel_y0_out(result, self);
692
+ }
693
+
694
+ /// Bessel function of the second kind of order 1.
695
+ ///
696
+ /// See https://pytorch.org/docs/master/special.html#torch.special.bessel_y1.
697
+ ///
698
+ /// Example:
699
+ ///
700
+ /// ```
701
+ /// auto x = torch::randn(128, dtype=kDouble);
702
+ ///
703
+ /// torch::special::bessel_y1(x);
704
+ /// ```
705
+ inline Tensor bessel_y1(const Tensor& self) {
706
+ return torch::special_bessel_y1(self);
707
+ }
708
+
709
+ inline Tensor& bessel_y1_out(Tensor& result, const Tensor& self) {
710
+ return torch::special_bessel_y1_out(result, self);
711
+ }
712
+
713
+ /// Chebyshev polynomial of the first kind.
714
+ ///
715
+ /// See
716
+ /// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_t.
717
+ ///
718
+ /// Example:
719
+ ///
720
+ /// ```
721
+ /// auto x = torch::randn(128, dtype=kDouble);
722
+ /// auto n = torch::randn(128, dtype=kDouble);
723
+ ///
724
+ /// torch::special::chebyshev_polynomial_t(x, n);
725
+ /// ```
726
+ inline Tensor chebyshev_polynomial_t(const Tensor& x, const Tensor& n) {
727
+ return torch::special_chebyshev_polynomial_t(x, n);
728
+ }
729
+
730
+ inline Tensor chebyshev_polynomial_t(const Scalar& x, const Tensor& n) {
731
+ return torch::special_chebyshev_polynomial_t(x, n);
732
+ }
733
+
734
+ inline Tensor chebyshev_polynomial_t(const Tensor& x, const Scalar& n) {
735
+ return torch::special_chebyshev_polynomial_t(x, n);
736
+ }
737
+
738
+ inline Tensor& chebyshev_polynomial_t_out(
739
+ Tensor& output,
740
+ const Tensor& x,
741
+ const Tensor& n) {
742
+ return torch::special_chebyshev_polynomial_t_out(output, x, n);
743
+ }
744
+
745
+ inline Tensor& chebyshev_polynomial_t_out(
746
+ Tensor& output,
747
+ const Scalar& x,
748
+ const Tensor& n) {
749
+ return torch::special_chebyshev_polynomial_t_out(output, x, n);
750
+ }
751
+
752
+ inline Tensor& chebyshev_polynomial_t_out(
753
+ Tensor& output,
754
+ const Tensor& x,
755
+ const Scalar& n) {
756
+ return torch::special_chebyshev_polynomial_t_out(output, x, n);
757
+ }
758
+
759
+ /// Chebyshev polynomial of the second kind.
760
+ ///
761
+ /// See
762
+ /// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_u.
763
+ ///
764
+ /// Example:
765
+ ///
766
+ /// ```
767
+ /// auto x = torch::randn(128, dtype=kDouble);
768
+ /// auto n = torch::randn(128, dtype=kDouble);
769
+ ///
770
+ /// torch::special::chebyshev_polynomial_u(x, n);
771
+ /// ```
772
+ inline Tensor chebyshev_polynomial_u(const Tensor& x, const Tensor& n) {
773
+ return torch::special_chebyshev_polynomial_u(x, n);
774
+ }
775
+
776
+ inline Tensor chebyshev_polynomial_u(const Scalar& x, const Tensor& n) {
777
+ return torch::special_chebyshev_polynomial_u(x, n);
778
+ }
779
+
780
+ inline Tensor chebyshev_polynomial_u(const Tensor& x, const Scalar& n) {
781
+ return torch::special_chebyshev_polynomial_u(x, n);
782
+ }
783
+
784
+ inline Tensor& chebyshev_polynomial_u_out(
785
+ Tensor& output,
786
+ const Tensor& x,
787
+ const Tensor& n) {
788
+ return torch::special_chebyshev_polynomial_u_out(output, x, n);
789
+ }
790
+
791
+ inline Tensor& chebyshev_polynomial_u_out(
792
+ Tensor& output,
793
+ const Scalar& x,
794
+ const Tensor& n) {
795
+ return torch::special_chebyshev_polynomial_u_out(output, x, n);
796
+ }
797
+
798
+ inline Tensor& chebyshev_polynomial_u_out(
799
+ Tensor& output,
800
+ const Tensor& x,
801
+ const Scalar& n) {
802
+ return torch::special_chebyshev_polynomial_u_out(output, x, n);
803
+ }
804
+
805
+ /// Chebyshev polynomial of the third kind.
806
+ ///
807
+ /// See
808
+ /// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_v.
809
+ ///
810
+ /// Example:
811
+ ///
812
+ /// ```
813
+ /// auto x = torch::randn(128, dtype=kDouble);
814
+ /// auto n = torch::randn(128, dtype=kDouble);
815
+ ///
816
+ /// torch::special::chebyshev_polynomial_v(x, n);
817
+ /// ```
818
+ inline Tensor chebyshev_polynomial_v(const Tensor& x, const Tensor& n) {
819
+ return torch::special_chebyshev_polynomial_v(x, n);
820
+ }
821
+
822
+ inline Tensor chebyshev_polynomial_v(const Scalar& x, const Tensor& n) {
823
+ return torch::special_chebyshev_polynomial_v(x, n);
824
+ }
825
+
826
+ inline Tensor chebyshev_polynomial_v(const Tensor& x, const Scalar& n) {
827
+ return torch::special_chebyshev_polynomial_v(x, n);
828
+ }
829
+
830
+ inline Tensor& chebyshev_polynomial_v_out(
831
+ Tensor& output,
832
+ const Tensor& x,
833
+ const Tensor& n) {
834
+ return torch::special_chebyshev_polynomial_v_out(output, x, n);
835
+ }
836
+
837
+ inline Tensor& chebyshev_polynomial_v_out(
838
+ Tensor& output,
839
+ const Scalar& x,
840
+ const Tensor& n) {
841
+ return torch::special_chebyshev_polynomial_v_out(output, x, n);
842
+ }
843
+
844
+ inline Tensor& chebyshev_polynomial_v_out(
845
+ Tensor& output,
846
+ const Tensor& x,
847
+ const Scalar& n) {
848
+ return torch::special_chebyshev_polynomial_v_out(output, x, n);
849
+ }
850
+
851
+ /// Chebyshev polynomial of the fourth kind.
852
+ ///
853
+ /// See
854
+ /// https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_w.
855
+ ///
856
+ /// Example:
857
+ ///
858
+ /// ```
859
+ /// auto x = torch::randn(128, dtype=kDouble);
860
+ /// auto n = torch::randn(128, dtype=kDouble);
861
+ ///
862
+ /// torch::special::chebyshev_polynomial_w(x, n);
863
+ /// ```
864
+ inline Tensor chebyshev_polynomial_w(const Tensor& x, const Tensor& n) {
865
+ return torch::special_chebyshev_polynomial_w(x, n);
866
+ }
867
+
868
+ inline Tensor chebyshev_polynomial_w(const Scalar& x, const Tensor& n) {
869
+ return torch::special_chebyshev_polynomial_w(x, n);
870
+ }
871
+
872
+ inline Tensor chebyshev_polynomial_w(const Tensor& x, const Scalar& n) {
873
+ return torch::special_chebyshev_polynomial_w(x, n);
874
+ }
875
+
876
+ inline Tensor& chebyshev_polynomial_w_out(
877
+ Tensor& output,
878
+ const Tensor& x,
879
+ const Tensor& n) {
880
+ return torch::special_chebyshev_polynomial_w_out(output, x, n);
881
+ }
882
+
883
+ inline Tensor& chebyshev_polynomial_w_out(
884
+ Tensor& output,
885
+ const Scalar& x,
886
+ const Tensor& n) {
887
+ return torch::special_chebyshev_polynomial_w_out(output, x, n);
888
+ }
889
+
890
+ inline Tensor& chebyshev_polynomial_w_out(
891
+ Tensor& output,
892
+ const Tensor& x,
893
+ const Scalar& n) {
894
+ return torch::special_chebyshev_polynomial_w_out(output, x, n);
895
+ }
896
+
897
+ /// Physicist’s Hermite polynomial.
898
+ ///
899
+ /// See
900
+ /// https://pytorch.org/docs/master/special.html#torch.special.hermite_polynomial_h.
901
+ ///
902
+ /// Example:
903
+ ///
904
+ /// ```
905
+ /// auto x = torch::randn(128, dtype=kDouble);
906
+ /// auto n = torch::randn(128, dtype=kDouble);
907
+ ///
908
+ /// torch::special::hermite_polynomial_h(x, n);
909
+ /// ```
910
+ inline Tensor hermite_polynomial_h(const Tensor& x, const Tensor& n) {
911
+ return torch::special_hermite_polynomial_h(x, n);
912
+ }
913
+
914
+ inline Tensor hermite_polynomial_h(const Scalar& x, const Tensor& n) {
915
+ return torch::special_hermite_polynomial_h(x, n);
916
+ }
917
+
918
+ inline Tensor hermite_polynomial_h(const Tensor& x, const Scalar& n) {
919
+ return torch::special_hermite_polynomial_h(x, n);
920
+ }
921
+
922
+ inline Tensor& hermite_polynomial_h_out(
923
+ Tensor& output,
924
+ const Tensor& x,
925
+ const Tensor& n) {
926
+ return torch::special_hermite_polynomial_h_out(output, x, n);
927
+ }
928
+
929
+ inline Tensor& hermite_polynomial_h_out(
930
+ Tensor& output,
931
+ const Scalar& x,
932
+ const Tensor& n) {
933
+ return torch::special_hermite_polynomial_h_out(output, x, n);
934
+ }
935
+
936
+ inline Tensor& hermite_polynomial_h_out(
937
+ Tensor& output,
938
+ const Tensor& x,
939
+ const Scalar& n) {
940
+ return torch::special_hermite_polynomial_h_out(output, x, n);
941
+ }
942
+
943
+ /// Probabilist’s Hermite polynomial.
944
+ ///
945
+ /// See
946
+ /// https://pytorch.org/docs/master/special.html#torch.special.hermite_polynomial_he.
947
+ ///
948
+ /// Example:
949
+ ///
950
+ /// ```
951
+ /// auto x = torch::randn(128, dtype=kDouble);
952
+ /// auto n = torch::randn(128, dtype=kDouble);
953
+ ///
954
+ /// torch::special::hermite_polynomial_he(x, n);
955
+ /// ```
956
+ inline Tensor hermite_polynomial_he(const Tensor& x, const Tensor& n) {
957
+ return torch::special_hermite_polynomial_he(x, n);
958
+ }
959
+
960
+ inline Tensor hermite_polynomial_he(const Scalar& x, const Tensor& n) {
961
+ return torch::special_hermite_polynomial_he(x, n);
962
+ }
963
+
964
+ inline Tensor hermite_polynomial_he(const Tensor& x, const Scalar& n) {
965
+ return torch::special_hermite_polynomial_he(x, n);
966
+ }
967
+
968
+ inline Tensor& hermite_polynomial_he_out(
969
+ Tensor& output,
970
+ const Tensor& x,
971
+ const Tensor& n) {
972
+ return torch::special_hermite_polynomial_he_out(output, x, n);
973
+ }
974
+
975
+ inline Tensor& hermite_polynomial_he_out(
976
+ Tensor& output,
977
+ const Scalar& x,
978
+ const Tensor& n) {
979
+ return torch::special_hermite_polynomial_he_out(output, x, n);
980
+ }
981
+
982
+ inline Tensor& hermite_polynomial_he_out(
983
+ Tensor& output,
984
+ const Tensor& x,
985
+ const Scalar& n) {
986
+ return torch::special_hermite_polynomial_he_out(output, x, n);
987
+ }
988
+
989
+ /// Laguerre polynomial.
990
+ ///
991
+ /// See
992
+ /// https://pytorch.org/docs/master/special.html#torch.special.laguerre_polynomial_l.
993
+ ///
994
+ /// Example:
995
+ ///
996
+ /// ```
997
+ /// auto x = torch::randn(128, dtype=kDouble);
998
+ /// auto n = torch::randn(128, dtype=kDouble);
999
+ ///
1000
+ /// torch::special::laguerre_polynomial_l(x, n);
1001
+ /// ```
1002
+ inline Tensor laguerre_polynomial_l(const Tensor& x, const Tensor& n) {
1003
+ return torch::special_laguerre_polynomial_l(x, n);
1004
+ }
1005
+
1006
+ inline Tensor laguerre_polynomial_l(const Scalar& x, const Tensor& n) {
1007
+ return torch::special_laguerre_polynomial_l(x, n);
1008
+ }
1009
+
1010
+ inline Tensor laguerre_polynomial_l(const Tensor& x, const Scalar& n) {
1011
+ return torch::special_laguerre_polynomial_l(x, n);
1012
+ }
1013
+
1014
+ inline Tensor& laguerre_polynomial_l_out(
1015
+ Tensor& output,
1016
+ const Tensor& x,
1017
+ const Tensor& n) {
1018
+ return torch::special_laguerre_polynomial_l_out(output, x, n);
1019
+ }
1020
+
1021
+ inline Tensor& laguerre_polynomial_l_out(
1022
+ Tensor& output,
1023
+ const Scalar& x,
1024
+ const Tensor& n) {
1025
+ return torch::special_laguerre_polynomial_l_out(output, x, n);
1026
+ }
1027
+
1028
+ inline Tensor& laguerre_polynomial_l_out(
1029
+ Tensor& output,
1030
+ const Tensor& x,
1031
+ const Scalar& n) {
1032
+ return torch::special_laguerre_polynomial_l_out(output, x, n);
1033
+ }
1034
+
1035
+ /// Legendre polynomial.
1036
+ ///
1037
+ /// See
1038
+ /// https://pytorch.org/docs/master/special.html#torch.special.legendre_polynomial_p.
1039
+ ///
1040
+ /// Example:
1041
+ ///
1042
+ /// ```
1043
+ /// auto x = torch::randn(128, dtype=kDouble);
1044
+ /// auto n = torch::randn(128, dtype=kDouble);
1045
+ ///
1046
+ /// torch::special::legendre_polynomial_p(x, n);
1047
+ /// ```
1048
+ inline Tensor legendre_polynomial_p(const Tensor& x, const Tensor& n) {
1049
+ return torch::special_legendre_polynomial_p(x, n);
1050
+ }
1051
+
1052
+ inline Tensor legendre_polynomial_p(const Scalar& x, const Tensor& n) {
1053
+ return torch::special_legendre_polynomial_p(x, n);
1054
+ }
1055
+
1056
+ inline Tensor legendre_polynomial_p(const Tensor& x, const Scalar& n) {
1057
+ return torch::special_legendre_polynomial_p(x, n);
1058
+ }
1059
+
1060
+ inline Tensor& legendre_polynomial_p_out(
1061
+ Tensor& output,
1062
+ const Tensor& x,
1063
+ const Tensor& n) {
1064
+ return torch::special_legendre_polynomial_p_out(output, x, n);
1065
+ }
1066
+
1067
+ inline Tensor& legendre_polynomial_p_out(
1068
+ Tensor& output,
1069
+ const Scalar& x,
1070
+ const Tensor& n) {
1071
+ return torch::special_legendre_polynomial_p_out(output, x, n);
1072
+ }
1073
+
1074
+ inline Tensor& legendre_polynomial_p_out(
1075
+ Tensor& output,
1076
+ const Tensor& x,
1077
+ const Scalar& n) {
1078
+ return torch::special_legendre_polynomial_p_out(output, x, n);
1079
+ }
1080
+
1081
+ /// Modified Bessel function of the first kind of order 0.
1082
+ ///
1083
+ /// See
1084
+ /// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_i0.
1085
+ ///
1086
+ /// Example:
1087
+ ///
1088
+ /// ```
1089
+ /// auto x = torch::randn(128, dtype=kDouble);
1090
+ ///
1091
+ /// torch::special::modified_bessel_i0(x);
1092
+ /// ```
1093
+ inline Tensor modified_bessel_i0(const Tensor& self) {
1094
+ return torch::special_modified_bessel_i0(self);
1095
+ }
1096
+
1097
+ inline Tensor& modified_bessel_i0_out(Tensor& result, const Tensor& self) {
1098
+ return torch::special_modified_bessel_i0_out(result, self);
1099
+ }
1100
+
1101
+ /// Modified Bessel function of the first kind of order 1.
1102
+ ///
1103
+ /// See
1104
+ /// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_i1.
1105
+ ///
1106
+ /// Example:
1107
+ ///
1108
+ /// ```
1109
+ /// auto x = torch::randn(128, dtype=kDouble);
1110
+ ///
1111
+ /// torch::special::modified_bessel_i1(x);
1112
+ /// ```
1113
+ inline Tensor modified_bessel_i1(const Tensor& self) {
1114
+ return torch::special_modified_bessel_i1(self);
1115
+ }
1116
+
1117
+ inline Tensor& modified_bessel_i1_out(Tensor& result, const Tensor& self) {
1118
+ return torch::special_modified_bessel_i1_out(result, self);
1119
+ }
1120
+
1121
+ /// Modified Bessel function of the second kind of order 0.
1122
+ ///
1123
+ /// See
1124
+ /// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_k0.
1125
+ ///
1126
+ /// Example:
1127
+ ///
1128
+ /// ```
1129
+ /// auto x = torch::randn(128, dtype=kDouble);
1130
+ ///
1131
+ /// torch::special::modified_bessel_k0(x);
1132
+ /// ```
1133
+ inline Tensor modified_bessel_k0(const Tensor& self) {
1134
+ return torch::special_modified_bessel_k0(self);
1135
+ }
1136
+
1137
+ inline Tensor& modified_bessel_k0_out(Tensor& result, const Tensor& self) {
1138
+ return torch::special_modified_bessel_k0_out(result, self);
1139
+ }
1140
+
1141
+ /// Modified Bessel function of the second kind of order 1.
1142
+ ///
1143
+ /// See
1144
+ /// https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_k1.
1145
+ ///
1146
+ /// Example:
1147
+ ///
1148
+ /// ```
1149
+ /// auto x = torch::randn(128, dtype=kDouble);
1150
+ ///
1151
+ /// torch::special::modified_bessel_k1(x);
1152
+ /// ```
1153
+ inline Tensor modified_bessel_k1(const Tensor& self) {
1154
+ return torch::special_modified_bessel_k1(self);
1155
+ }
1156
+
1157
+ inline Tensor& modified_bessel_k1_out(Tensor& result, const Tensor& self) {
1158
+ return torch::special_modified_bessel_k1_out(result, self);
1159
+ }
1160
+
1161
+ /// Scaled modified Bessel function of the second kind of order 0.
1162
+ ///
1163
+ /// See
1164
+ /// https://pytorch.org/docs/master/special.html#torch.special.scaled_modified_bessel_k0.
1165
+ ///
1166
+ /// Example:
1167
+ ///
1168
+ /// ```
1169
+ /// auto x = torch::randn(128, dtype=kDouble);
1170
+ ///
1171
+ /// torch::special::scaled_modified_bessel_k0(x);
1172
+ /// ```
1173
+ inline Tensor scaled_modified_bessel_k0(const Tensor& x) {
1174
+ return torch::special_scaled_modified_bessel_k0(x);
1175
+ }
1176
+
1177
+ inline Tensor& scaled_modified_bessel_k0_out(Tensor& y, const Tensor& x) {
1178
+ return torch::special_scaled_modified_bessel_k0_out(y, x);
1179
+ }
1180
+
1181
+ /// Scaled modified Bessel function of the second kind of order 1.
1182
+ ///
1183
+ /// See
1184
+ /// https://pytorch.org/docs/master/special.html#torch.special.scaled_modified_bessel_k1.
1185
+ ///
1186
+ /// Example:
1187
+ ///
1188
+ /// ```
1189
+ /// auto x = torch::randn(128, dtype=kDouble);
1190
+ ///
1191
+ /// torch::special::scaled_modified_bessel_k1(x);
1192
+ /// ```
1193
+ inline Tensor scaled_modified_bessel_k1(const Tensor& x) {
1194
+ return torch::special_scaled_modified_bessel_k1(x);
1195
+ }
1196
+
1197
+ inline Tensor& scaled_modified_bessel_k1_out(Tensor& y, const Tensor& x) {
1198
+ return torch::special_scaled_modified_bessel_k1_out(y, x);
1199
+ }
1200
+
1201
+ /// Shifted Chebyshev polynomial of the first kind.
1202
+ ///
1203
+ /// See
1204
+ /// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_t.
1205
+ ///
1206
+ /// Example:
1207
+ ///
1208
+ /// ```
1209
+ /// auto x = torch::randn(128, dtype=kDouble);
1210
+ /// auto n = torch::randn(128, dtype=kDouble);
1211
+ ///
1212
+ /// torch::special::shifted_chebyshev_polynomial_t(x, n);
1213
+ /// ```
1214
+ inline Tensor shifted_chebyshev_polynomial_t(const Tensor& x, const Tensor& n) {
1215
+ return torch::special_shifted_chebyshev_polynomial_t(x, n);
1216
+ }
1217
+
1218
+ inline Tensor shifted_chebyshev_polynomial_t(const Scalar& x, const Tensor& n) {
1219
+ return torch::special_shifted_chebyshev_polynomial_t(x, n);
1220
+ }
1221
+
1222
+ inline Tensor shifted_chebyshev_polynomial_t(const Tensor& x, const Scalar& n) {
1223
+ return torch::special_shifted_chebyshev_polynomial_t(x, n);
1224
+ }
1225
+
1226
+ inline Tensor& shifted_chebyshev_polynomial_t_out(
1227
+ Tensor& output,
1228
+ const Tensor& x,
1229
+ const Tensor& n) {
1230
+ return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n);
1231
+ }
1232
+
1233
+ inline Tensor& shifted_chebyshev_polynomial_t_out(
1234
+ Tensor& output,
1235
+ const Scalar& x,
1236
+ const Tensor& n) {
1237
+ return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n);
1238
+ }
1239
+
1240
+ inline Tensor& shifted_chebyshev_polynomial_t_out(
1241
+ Tensor& output,
1242
+ const Tensor& x,
1243
+ const Scalar& n) {
1244
+ return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n);
1245
+ }
1246
+
1247
+ /// Shifted Chebyshev polynomial of the second kind.
1248
+ ///
1249
+ /// See
1250
+ /// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_u.
1251
+ ///
1252
+ /// Example:
1253
+ ///
1254
+ /// ```
1255
+ /// auto x = torch::randn(128, dtype=kDouble);
1256
+ /// auto n = torch::randn(128, dtype=kDouble);
1257
+ ///
1258
+ /// torch::special::shifted_chebyshev_polynomial_u(x, n);
1259
+ /// ```
1260
+ inline Tensor shifted_chebyshev_polynomial_u(const Tensor& x, const Tensor& n) {
1261
+ return torch::special_shifted_chebyshev_polynomial_u(x, n);
1262
+ }
1263
+
1264
+ inline Tensor shifted_chebyshev_polynomial_u(const Scalar& x, const Tensor& n) {
1265
+ return torch::special_shifted_chebyshev_polynomial_u(x, n);
1266
+ }
1267
+
1268
+ inline Tensor shifted_chebyshev_polynomial_u(const Tensor& x, const Scalar& n) {
1269
+ return torch::special_shifted_chebyshev_polynomial_u(x, n);
1270
+ }
1271
+
1272
+ inline Tensor& shifted_chebyshev_polynomial_u_out(
1273
+ Tensor& output,
1274
+ const Tensor& x,
1275
+ const Tensor& n) {
1276
+ return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n);
1277
+ }
1278
+
1279
+ inline Tensor& shifted_chebyshev_polynomial_u_out(
1280
+ Tensor& output,
1281
+ const Scalar& x,
1282
+ const Tensor& n) {
1283
+ return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n);
1284
+ }
1285
+
1286
+ inline Tensor& shifted_chebyshev_polynomial_u_out(
1287
+ Tensor& output,
1288
+ const Tensor& x,
1289
+ const Scalar& n) {
1290
+ return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n);
1291
+ }
1292
+
1293
+ /// Shifted Chebyshev polynomial of the third kind.
1294
+ ///
1295
+ /// See
1296
+ /// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_v.
1297
+ ///
1298
+ /// Example:
1299
+ ///
1300
+ /// ```
1301
+ /// auto x = torch::randn(128, dtype=kDouble);
1302
+ /// auto n = torch::randn(128, dtype=kDouble);
1303
+ ///
1304
+ /// torch::special::shifted_chebyshev_polynomial_v(x, n);
1305
+ /// ```
1306
+ inline Tensor shifted_chebyshev_polynomial_v(const Tensor& x, const Tensor& n) {
1307
+ return torch::special_shifted_chebyshev_polynomial_v(x, n);
1308
+ }
1309
+
1310
+ inline Tensor shifted_chebyshev_polynomial_v(const Scalar& x, const Tensor& n) {
1311
+ return torch::special_shifted_chebyshev_polynomial_v(x, n);
1312
+ }
1313
+
1314
+ inline Tensor shifted_chebyshev_polynomial_v(const Tensor& x, const Scalar& n) {
1315
+ return torch::special_shifted_chebyshev_polynomial_v(x, n);
1316
+ }
1317
+
1318
+ inline Tensor& shifted_chebyshev_polynomial_v_out(
1319
+ Tensor& output,
1320
+ const Tensor& x,
1321
+ const Tensor& n) {
1322
+ return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n);
1323
+ }
1324
+
1325
+ inline Tensor& shifted_chebyshev_polynomial_v_out(
1326
+ Tensor& output,
1327
+ const Scalar& x,
1328
+ const Tensor& n) {
1329
+ return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n);
1330
+ }
1331
+
1332
+ inline Tensor& shifted_chebyshev_polynomial_v_out(
1333
+ Tensor& output,
1334
+ const Tensor& x,
1335
+ const Scalar& n) {
1336
+ return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n);
1337
+ }
1338
+
1339
+ /// Shifted Chebyshev polynomial of the fourth kind.
1340
+ ///
1341
+ /// See
1342
+ /// https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_w.
1343
+ ///
1344
+ /// Example:
1345
+ ///
1346
+ /// ```
1347
+ /// auto x = torch::randn(128, dtype=kDouble);
1348
+ /// auto n = torch::randn(128, dtype=kDouble);
1349
+ ///
1350
+ /// torch::special::shifted_chebyshev_polynomial_w(x, n);
1351
+ /// ```
1352
+ inline Tensor shifted_chebyshev_polynomial_w(const Tensor& x, const Tensor& n) {
1353
+ return torch::special_shifted_chebyshev_polynomial_w(x, n);
1354
+ }
1355
+
1356
+ inline Tensor shifted_chebyshev_polynomial_w(const Scalar& x, const Tensor& n) {
1357
+ return torch::special_shifted_chebyshev_polynomial_w(x, n);
1358
+ }
1359
+
1360
+ inline Tensor shifted_chebyshev_polynomial_w(const Tensor& x, const Scalar& n) {
1361
+ return torch::special_shifted_chebyshev_polynomial_w(x, n);
1362
+ }
1363
+
1364
+ inline Tensor& shifted_chebyshev_polynomial_w_out(
1365
+ Tensor& output,
1366
+ const Tensor& x,
1367
+ const Tensor& n) {
1368
+ return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n);
1369
+ }
1370
+
1371
+ inline Tensor& shifted_chebyshev_polynomial_w_out(
1372
+ Tensor& output,
1373
+ const Scalar& x,
1374
+ const Tensor& n) {
1375
+ return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n);
1376
+ }
1377
+
1378
+ inline Tensor& shifted_chebyshev_polynomial_w_out(
1379
+ Tensor& output,
1380
+ const Tensor& x,
1381
+ const Scalar& n) {
1382
+ return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n);
1383
+ }
1384
+
1385
+ /// Spherical Bessel function of the first kind of order 0.
1386
+ ///
1387
+ /// See
1388
+ /// https://pytorch.org/docs/master/special.html#torch.special.spherical_bessel_j0.
1389
+ ///
1390
+ /// Example:
1391
+ ///
1392
+ /// ```
1393
+ /// auto x = torch::randn(128, dtype=kDouble);
1394
+ ///
1395
+ /// torch::special::spherical_bessel_j0(x);
1396
+ /// ```
1397
+ inline Tensor spherical_bessel_j0(const Tensor& x) {
1398
+ return torch::special_spherical_bessel_j0(x);
1399
+ }
1400
+
1401
+ inline Tensor& spherical_bessel_j0_out(Tensor& y, const Tensor& x) {
1402
+ return torch::special_spherical_bessel_j0_out(y, x);
1403
+ }
1404
+ } // namespace special
1405
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/types.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+
5
+ #include <c10/util/Optional.h>
6
+
7
+ #include <torch/csrc/autograd/generated/variable_factories.h>
8
+ #include <torch/csrc/autograd/variable.h>
9
+
10
+ // TODO: These don't really belong here but torchvision builds in CI need them
11
+ // Remove once the torchvision version being compiled in CI is updated
12
+ #include <ATen/core/dispatch/Dispatcher.h>
13
+ #include <torch/library.h>
14
+
15
+ namespace torch {
16
+
17
+ // NOTE [ Exposing declarations in `at::` to `torch::` ]
18
+ //
19
+ // The following line `using namespace at;` is responsible for exposing all
20
+ // declarations in `at::` namespace to `torch::` namespace.
21
+ //
22
+ // According to the rules laid out in
23
+ // https://en.cppreference.com/w/cpp/language/qualified_lookup, section
24
+ // "Namespace members":
25
+ // ```
26
+ // Qualified lookup within the scope of a namespace N first considers all
27
+ // declarations that are located in N and all declarations that are located in
28
+ // the inline namespace members of N (and, transitively, in their inline
29
+ // namespace members). If there are no declarations in that set then it
30
+ // considers declarations in all namespaces named by using-directives found in N
31
+ // and in all transitive inline namespace members of N.
32
+ // ```
33
+ //
34
+ // This means that if both `at::` and `torch::` namespaces have a function with
35
+ // the same signature (e.g. both `at::func()` and `torch::func()` exist), after
36
+ // `namespace torch { using namespace at; }`, when we call `torch::func()`, the
37
+ // `func()` function defined in `torch::` namespace will always be called, and
38
+ // the `func()` function defined in `at::` namespace is always hidden.
39
+ using namespace at; // NOLINT
40
+
41
+ using c10::nullopt;
42
+ using c10::optional;
43
+
44
+ using Dtype = at::ScalarType;
45
+
46
+ /// Fixed width dtypes.
47
+ constexpr auto kUInt8 = at::kByte;
48
+ constexpr auto kInt8 = at::kChar;
49
+ constexpr auto kInt16 = at::kShort;
50
+ constexpr auto kInt32 = at::kInt;
51
+ constexpr auto kInt64 = at::kLong;
52
+ constexpr auto kFloat16 = at::kHalf;
53
+ constexpr auto kFloat32 = at::kFloat;
54
+ constexpr auto kFloat64 = at::kDouble;
55
+
56
+ /// Rust-style short dtypes.
57
+ constexpr auto kU8 = kUInt8;
58
+ constexpr auto kI8 = kInt8;
59
+ constexpr auto kI16 = kInt16;
60
+ constexpr auto kI32 = kInt32;
61
+ constexpr auto kI64 = kInt64;
62
+ constexpr auto kF16 = kFloat16;
63
+ constexpr auto kF32 = kFloat32;
64
+ constexpr auto kF64 = kFloat64;
65
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/utils.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Parallel.h>
4
+ #include <ATen/record_function.h>
5
+ #include <torch/csrc/api/include/torch/types.h>
6
+ #include <torch/csrc/autograd/grad_mode.h>
7
+ #include <torch/csrc/autograd/profiler.h>
8
+ #include <cstdint>
9
+
10
+ namespace torch {
11
+
12
+ /// A RAII, thread-local guard that disabled gradient calculation.
13
+ ///
14
+ /// Disabling gradient calculation is useful for inference, when you are sure
15
+ /// that you will not call `at::Tensor::backward`. It will reduce memory
16
+ /// consumption for computations that would otherwise have `requires_grad() ==
17
+ /// true`.
18
+ ///
19
+ /// In this mode, the result of every computation will have
20
+ /// `requires_grad() == false`, even when the inputs have `requires_grad() ==
21
+ /// true`.
22
+ ///
23
+ /// This context manager is thread-local; it will not affect computation
24
+ /// in other threads.
25
+ ///
26
+ /// Example:
27
+ /// @code
28
+ /// auto x = torch::tensor({1.}, torch::requires_grad());
29
+ /// {
30
+ /// torch::NoGradGuard no_grad;
31
+ /// auto y = x * 2;
32
+ /// std::cout << y.requires_grad() << std::endl; // prints `false`
33
+ /// }
34
+ /// {
35
+ /// auto doubler = [](torch::Tensor x) {
36
+ /// torch::NoGradGuard no_grad;
37
+ /// return x * 2;
38
+ /// };
39
+ /// auto z = doubler(x);
40
+ /// std::cout << z.requires_grad() << std::endl; // prints `false`
41
+ /// }
42
+ /// @endcode
43
+ using NoGradGuard = at::NoGradGuard;
44
+
45
+ /// A RAII, thread-local guard that sets gradient calculation to on or off.
46
+ ///
47
+ /// ``AutoGradMode`` will enable or disable grads based on its argument
48
+ /// `enabled`.
49
+ ///
50
+ /// This context manager is thread-local; it will not affect computation
51
+ /// in other threads.
52
+ ///
53
+ /// \param enabled: Flag whether to enable grad (``true``), or disable
54
+ /// (``false``). This can be used to conditionally enable
55
+ /// gradients.
56
+ ///
57
+ /// Example:
58
+ /// @code
59
+ /// auto x = torch::tensor({1.}, torch::requires_grad());
60
+ /// {
61
+ /// torch::AutoGradMode enable_grad(true);
62
+ /// auto y = x * 2;
63
+ /// std::cout << y.requires_grad() << std::endl; // prints `true`
64
+ /// }
65
+ /// {
66
+ /// torch::AutoGradMode enable_grad(false);
67
+ /// auto y = x * 2;
68
+ /// std::cout << y.requires_grad() << std::endl; // prints `false`
69
+ /// }
70
+ /// @endcode
71
+ using AutoGradMode = at::AutoGradMode;
72
+
73
+ /// Sets the global random seed for all newly created CPU and CUDA tensors.
74
+ using at::manual_seed;
75
+
76
+ // Called during new thread initialization
77
+ using at::init_num_threads;
78
+
79
+ // Returns the number of threads used in parallel region.
80
+ using at::get_num_threads;
81
+
82
+ // Sets the number of threads to be used in parallel region.
83
+ using at::set_num_threads;
84
+
85
+ // Returns the number of threads used for inter-op parallelism.
86
+ using at::get_num_interop_threads;
87
+
88
+ // Sets the number of threads to be used for inter-op parallelism.
89
+ using at::set_num_interop_threads;
90
+
91
+ // Returns true if both t1, t2 are undefined or both are defined and equal
92
+ inline bool equal_if_defined(Tensor t1, Tensor t2) {
93
+ return (
94
+ (!t1.defined() && !t2.defined()) ||
95
+ (t1.defined() && t2.defined() && torch::equal(t1, t2)));
96
+ }
97
+
98
+ // RecordFunction API
99
+ using at::addGlobalCallback;
100
+ using at::addThreadLocalCallback;
101
+ using at::CallbackHandle;
102
+ using at::clearCallbacks;
103
+ using at::clearGlobalCallbacks;
104
+ using at::clearThreadLocalCallbacks;
105
+ using at::DisableRecordFunctionGuard;
106
+ using at::enableRecordFunction;
107
+ using at::hasCallbacks;
108
+ using at::hasGlobalCallbacks;
109
+ using at::hasThreadLocalCallbacks;
110
+ using at::isRecordFunctionEnabled;
111
+ using at::RecordFunction;
112
+ using at::RecordFunctionCallback;
113
+ using at::RecordFunctionGuard;
114
+ using at::removeCallback;
115
+
116
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/version.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Indicates the major version of LibTorch.
4
+ #define TORCH_VERSION_MAJOR 2
5
+
6
+ /// Indicates the minor version of LibTorch.
7
+ #define TORCH_VERSION_MINOR 2
8
+
9
+ /// Indicates the patch version of LibTorch.
10
+ #define TORCH_VERSION_PATCH 0
11
+
12
+ /// Indicates the version of LibTorch.
13
+ #define TORCH_VERSION \
14
+ "2.2.0"
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/alias_info.h>
4
+ #include <c10/util/flat_hash_map.h>
5
+ #include <torch/csrc/jit/ir/ir.h>
6
+ #include <torch/csrc/jit/ir/type_hashing.h>
7
+ #include <torch/csrc/jit/passes/create_functional_graphs.h>
8
+ #include <torch/csrc/jit/passes/utils/memory_dag.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ /**
14
+ * Alias analysis pass.
15
+ *
16
+ * This pass produces an AliasDb that contains aliasing and mutation
17
+ * information about the graph. Users can use this information to determine
18
+ * whether mutations to the graph are safe, i.e. they don't reorder/change
19
+ * nodes in a way that affects output.
20
+ *
21
+ * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be
22
+ * associated with one or more "alias sets". If two values share an alias set,
23
+ * that means they may alias, implying that a mutation to one value cannot be
24
+ * reordered past a use of the other. Only reordering two reads of an alias set
25
+ * is considered safe.
26
+ *
27
+ * There is a special alias set called the "wildcard set", which indicates that
28
+ * we're not sure what this value may alias. To be conservative, we consider the
29
+ * wildcard alias set as potentially aliasing any other wildcard value within
30
+ * the same type class. Whenever a value becomes contained by another value,
31
+ * such as when a Tensor is appended to a List[Tensor], the contained element
32
+ * becomes part of the wildcard set.
33
+ *
34
+ * Values that contain other mutable types, such as List[Tensor], are
35
+ * initialized as containing the Wildcard set for all contained mutable types.
36
+ *
37
+ * The AliasDb API references the idea of "mutable" vs "immutable"
38
+ * types. "Mutable" means that the object's value can change, while
39
+ * "immutable" means that the value is fixed. (For example, `List` is
40
+ * mutable, so you can add and delete elements from it. On the other
41
+ * hand, you can't modify a Tuple once you create it, making `Tuple` an
42
+ * immutable container.)
43
+ *
44
+ * `isFrozen` - if the Module is frozen then consider attributes as freshly
45
+ * created objects. Freezing API invokes alias analysis to check if they are
46
+ * mutated internally.
47
+ *
48
+ * `descendFunctionCalls` - recursively analyze function and method calls
49
+ * instead of conservative analysis. Generally analysis should be done after
50
+ * inlining so the implmentation for recursive analysis is unoptimized.
51
+ */
52
+ class AliasDb {
53
+ public:
54
+ TORCH_API explicit AliasDb(
55
+ std::shared_ptr<Graph> graphi,
56
+ bool isFrozen = false,
57
+ bool descendFunctionCalls = false);
58
+ TORCH_API ~AliasDb();
59
+
60
+ // There are limitations to what effects the alias analysis can track. Two
61
+ // kinds of nodes may have untracked effects:
62
+ // 1. Nodes that write to a value that may alias the graph inputs (since
63
+ // the inputs can be used outside the graph).
64
+ // 2. Nodes that write to something in the wildcard set.
65
+ //
66
+ // These nodes are considered not safe to eliminate or mutate under any
67
+ // circumstances.
68
+ bool writesToWildcard(Node* n) const;
69
+
70
+ // Does `n` write to an alias of one of the values in `vs`?
71
+ // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks
72
+ TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const;
73
+
74
+ // Does `a` and `b` potentially share a memory location or do either
75
+ // hold in memory any element that exists in the other
76
+ TORCH_API bool mayContainAlias(Value* a, Value* b) const;
77
+
78
+ TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef<Value*> b) const;
79
+
80
+ // Do any values in group `a` share a memory location or hold in memory
81
+ // any element that exists in group `b`
82
+ TORCH_API bool mayContainAlias(
83
+ const at::ArrayRef<Value*> a,
84
+ const at::ArrayRef<Value*> b) const;
85
+
86
+ // Do `a` and `b` potentially share a memory location?
87
+ TORCH_API bool mayAlias(const Value* a, const Value* b) const;
88
+ // Do any values in group `a` potentially share a memory location with any
89
+ // value in group `b`? i.e. may they overlap?
90
+ TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const;
91
+
92
+ // Do any nodes write to an alias set input to `n`?
93
+ TORCH_API bool hasInputWriters(const Node* n) const;
94
+
95
+ // Do any nodes write to an alias set output by `n`?
96
+ TORCH_API bool hasOutputWriters(const Node* n) const;
97
+
98
+ // Do any nodes write to an alias set inputed/outputed by `n`?
99
+ TORCH_API bool hasWriters(const Node* n) const;
100
+
101
+ // Do any nodes write to `v`s memory location?
102
+ TORCH_API bool hasWriters(const Value* v) const;
103
+
104
+ // Is the operation in-place? i.e. doesn't write anywhere but locations it
105
+ // reads from.
106
+ TORCH_API bool isMutable(Node* n) const;
107
+
108
+ TORCH_API bool escapesScope(const at::ArrayRef<Value*>& vs) const;
109
+
110
+ // Is it safe to change whether `a` and `b` alias each other ?
111
+ TORCH_API bool safeToChangeAliasingRelationship(
112
+ const at::ArrayRef<Value*>& a,
113
+ const at::ArrayRef<Value*>& b) const;
114
+
115
+ // Move `n` (already in the graph) after `movePoint` in the topological order.
116
+ //
117
+ // Tries to preserve value dependencies, so other nodes might be moved. We
118
+ // make two guarantees about the postcondition of the node list:
119
+ // - `n` is directly after `movePoint`.
120
+ // - only nodes between `n` and `movePoint` have been moved.
121
+ //
122
+ // Returns `false` if it's impossible to move `n` after `MovePoint` without
123
+ // violating dependencies, otherwise executes the move and returns `true`
124
+ TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint);
125
+ TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint);
126
+
127
+ bool couldMoveAfterTopologically(Node* n, Node* movePoint);
128
+ bool couldMoveBeforeTopologically(Node* n, Node* movePoint);
129
+
130
+ // For debugging: print alias db state to stdout
131
+ TORCH_API void dump() const;
132
+ TORCH_API std::string toString() const;
133
+
134
+ // Generates a DOT (www.graphviz.org) graph representation
135
+ //
136
+ // Returns `true` if the output file was successfully generated
137
+ //
138
+ // WARNING: The output dot file path can't include shell specific notations,
139
+ // for example you can't use "~/temp/aliasdb.dot"
140
+ // (instead, use "/home/user/temp/aliasdb.dot")
141
+ //
142
+ TORCH_API bool dumpToGraphvizFile(const char* filename) const;
143
+ TORCH_API std::string toGraphviz() const;
144
+
145
+ // Returns `true` if the given element is mutable or if it is a
146
+ // container type with an internal mutable element (e.g.
147
+ // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so
148
+ // it would be considered a "mutable type" in AliasDb)
149
+ static bool isMutableType(const Value* v);
150
+ static bool isMutableType(const TypePtr& type);
151
+
152
+ /**
153
+ * Mutation API
154
+ *
155
+ * These methods allow you to update AliasDb in-place if you are performing
156
+ * graph mutation.
157
+ *
158
+ * WARNING: These methods should be considered INTERNAL. They do not perform
159
+ * very many correctness checks, the user is responsible for making sure they
160
+ * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with
161
+ * this.
162
+ */
163
+ // Copy `existing`s aliasing info to `new_value`, and remove `existing`.
164
+ TORCH_API void replaceWithNewValue(Value* existing, Value* new_value);
165
+ // Copy `from`s aliasing info to `to`.
166
+ TORCH_API void copyValue(Value* from, Value* to);
167
+ // Create a new `value` that does not alias anything else.
168
+ TORCH_API void createValue(const Value* value);
169
+
170
+ // Enable more precise treatment of prim::TupleConstruct.
171
+ void enablePreciseTupleContainerAnalysis();
172
+
173
+ friend struct MutationRemover;
174
+
175
+ private:
176
+ // Helper for topologically-safe node moves.
177
+ class WorkingSet;
178
+ enum class MoveSide { BEFORE, AFTER };
179
+ bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun);
180
+ void move(Node* toMove, Node* movePoint, MoveSide moveSide);
181
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
182
+
183
+ bool isMutableTypeInternal(const Value* v) const;
184
+ bool isMutableTypeInternal(const TypePtr& type) const;
185
+
186
+ /**
187
+ * Write and read internal API
188
+ */
189
+ // Get all the values that `n` writes to.
190
+ // NOTE: this only returns values directly written to, not aliases thereof
191
+ //
192
+ // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks
193
+ MemoryLocations getWrites(Node* n) const;
194
+ void getWritesImpl(Node* n, MemoryLocations& ret) const;
195
+ // Register the fact that `n` writes to `v`.
196
+ void registerWrite(const Value* v, Node* n, bool writeToContained = false);
197
+ // Get all the values that `n` reads from.
198
+ // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks
199
+ MemoryLocations getReads(Node* n) const;
200
+ void getReadsImpl(Node* n, MemoryLocations& ret) const;
201
+
202
+ /**
203
+ * Wildcard methods
204
+ */
205
+ // Register `v` as a wildcard value.
206
+ c10::optional<Element*> setWildcard(const Value* v);
207
+
208
+ // Is this a value which will not alias?
209
+ bool nonAliasingValue(const Value* elem) const;
210
+
211
+ /**
212
+ * Special analysis methods
213
+ */
214
+ void analyze(const std::shared_ptr<Graph>& graph);
215
+ void analyze(Block* block);
216
+ void analyze(Node* node);
217
+ void analyzeImpl(Node* node);
218
+ void analyzeIf(Node* node);
219
+ void analyzeLoop(Node* node);
220
+ void analyzeSubgraph(Node* node, std::shared_ptr<Graph> subgraph);
221
+ void analyzeSubgraph(Node* node);
222
+ void analyzeCreator(Node* node);
223
+ void analyzeExtractor(Node* node);
224
+ void analyzeChunk(Node* node);
225
+ void analyzeBroadcastingChunk(Node* node);
226
+ void analyzeFork(Node* node);
227
+ void analyzeWait(Node* node);
228
+ void analyzeAwaitable(Node* node);
229
+ void analyzeAwaitableWait(Node* node);
230
+ void analyzeRpcAsync(Node* node);
231
+ void analyzeBatchNorm(Node* node);
232
+ void analyzeInstanceNorm(Node* node);
233
+ void analyzeGradOf(Node* node);
234
+ void analyzeSetAttr(Node* node);
235
+ void analyzeConservative(Node* node);
236
+ void analyzeContainerConstruct(Node* node);
237
+ bool tryRegisteredAnalysis(Node* node);
238
+
239
+ /**
240
+ * Alias manipulation methods
241
+ */
242
+ void makeAllAlias(const std::vector<Value*>& values);
243
+ void makePointerTo(const Value* value, const Value* to);
244
+ TORCH_API void addToContainedElements(
245
+ const Value* element,
246
+ const Value* container);
247
+ void mapAliases(at::ArrayRef<Value*> to, at::ArrayRef<Value*> from);
248
+ void giveFreshAlias(
249
+ const Value* value,
250
+ bool add_wildcard_to_contained_elems = true);
251
+ Element* getOrCreateElement(const Value* value);
252
+
253
+ const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const;
254
+ bool functionalNonEscapingListUse(const Use& use) const;
255
+ bool functionalNonEscapingTupleUse(const Use& use) const;
256
+
257
+ std::shared_ptr<Graph> graph_;
258
+
259
+ // If the Module is frozen then consider attributes as freshly created
260
+ // objects. Freezing API invokes alias analysis to check if they are mutated
261
+ // internally.
262
+ bool isFrozen_;
263
+
264
+ bool descend_function_calls_;
265
+ std::unordered_map<Graph*, std::vector<std::shared_ptr<Graph>>>
266
+ function_call_copies_;
267
+
268
+ // The points-to graph that stores aliasing relationships
269
+ std::unique_ptr<MemoryDAGBuilder> memoryDAGBuilder_;
270
+ std::unique_ptr<MemoryDAG> memoryDAG_;
271
+
272
+ // Mapping of values to MemoryDAG elements
273
+ ska::flat_hash_map<const Value*, Element*> elementMap_;
274
+ // All wildcard Elements (one for each unique mutable type)
275
+ ska::flat_hash_map<TypePtr, Element*, HashType, EqualType> wildcardIndex_;
276
+ Element* getWildcard(const TypePtr& type) const;
277
+ c10::optional<Element*> tryGetOrCreateWildcard(const TypePtr& type);
278
+ void addContainedTypesToFreshElement(
279
+ Element* container_elem,
280
+ const AliasTypeSet& mut_types);
281
+ void pointUnionTypeElementToAllContainedTypes(
282
+ Element* container_elem,
283
+ const AliasTypeSet& mut_types);
284
+
285
+ std::vector<Element*> getElements(at::ArrayRef<Value*> vs) const;
286
+ bool mayAliasWildcard(const Value* v) const;
287
+ bool mayAliasWildcard(const at::ArrayRef<Value*> vs) const;
288
+ bool hasWriters(const at::ArrayRef<Value*>& values) const;
289
+
290
+ // Cached mapping of type ptrs to their mutable types
291
+ mutable ska::flat_hash_map<TypePtr, AliasTypeSet> mapped_mutable_types_;
292
+
293
+ /**
294
+ * State for tracking write info.
295
+ */
296
+ // Write registry where the analysis can record the writes as it sees them.
297
+ // This information is later denormalized into various caches to improve query
298
+ // efficiency.
299
+ struct WriteRegistry;
300
+ std::unique_ptr<WriteRegistry> writeRegistry_;
301
+
302
+ // Map of nodes to the memory locations that they write to
303
+ using TWriteIndex = ska::flat_hash_map<Node*, MemoryLocations>;
304
+ c10::optional<TWriteIndex> writeIndex_;
305
+ // Collection of all memory locations that are written to.
306
+ c10::optional<MemoryLocations> writtenToLocationsIndex_;
307
+ void buildWrittenToLocationsIndex();
308
+
309
+ std::unordered_set<const Value*> wildcards_;
310
+
311
+ std::string getElementName(const Element* e) const;
312
+
313
+ friend void Lint(const AliasDb* db);
314
+ };
315
+
316
+ // Helper check that invariants over AliasDb are maintained.
317
+ // Useful if you are using the AliasDb mutation API and want to check you did
318
+ // the right thing.
319
+ TORCH_API void Lint(const AliasDb* db);
320
+
321
+ } // namespace jit
322
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <string>
4
+ #include <vector>
5
+
6
+ #include <ATen/core/jit_type_base.h>
7
+ #include <ATen/core/symbol.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+
11
+ namespace torch {
12
+ namespace jit {
13
+
14
+ using ::c10::Symbol;
15
+
16
+ constexpr int max_tensor_display_size = 10;
17
+
18
+ enum class AttributeKind {
19
+ f,
20
+ fs,
21
+ c,
22
+ cs,
23
+ i,
24
+ is,
25
+ s,
26
+ ss,
27
+ t,
28
+ ts,
29
+ g,
30
+ gs,
31
+ ty,
32
+ tys,
33
+ ival
34
+ };
35
+ static inline const char* toString(AttributeKind kind) {
36
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
37
+ static const char* names[] = {
38
+ "f",
39
+ "c",
40
+ "cs",
41
+ "fs",
42
+ "i",
43
+ "is",
44
+ "s",
45
+ "ss",
46
+ "t",
47
+ "ts",
48
+ "g",
49
+ "gs",
50
+ "ty",
51
+ "tys",
52
+ "ival"};
53
+ AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(*names));
54
+ return names[int(kind)];
55
+ }
56
+
57
+ struct AttributeValue {
58
+ AttributeValue(Symbol name) : name(name) {}
59
+ using Ptr = std::unique_ptr<AttributeValue>;
60
+ Symbol name;
61
+ virtual AttributeKind kind() const = 0;
62
+ virtual Ptr clone() const = 0;
63
+ virtual ~AttributeValue() = default;
64
+ };
65
+
66
+ template <typename T, AttributeKind Kind>
67
+ struct ScalarAttributeValue : public AttributeValue {
68
+ using ConstructorType = T;
69
+ using ValueType = T;
70
+ ScalarAttributeValue(Symbol name, ConstructorType value_)
71
+ : AttributeValue(name), value_(std::move(value_)) {}
72
+ ValueType& value() {
73
+ return value_;
74
+ }
75
+ Ptr clone() const override {
76
+ return Ptr(new ScalarAttributeValue(name, value_));
77
+ }
78
+ AttributeKind kind() const override {
79
+ return Kind;
80
+ }
81
+
82
+ private:
83
+ ValueType value_;
84
+ };
85
+
86
+ template <typename T, AttributeKind Kind>
87
+ struct VectorAttributeValue : public AttributeValue {
88
+ using ConstructorType = std::vector<T>;
89
+ using ValueType = std::vector<T>;
90
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
91
+ VectorAttributeValue(Symbol name, ConstructorType value_)
92
+ : AttributeValue(name), value_(std::move(value_)) {}
93
+ ValueType& value() {
94
+ return value_;
95
+ }
96
+ AttributeKind kind() const override {
97
+ return Kind;
98
+ }
99
+ std::unique_ptr<AttributeValue> clone() const override {
100
+ auto copy = value_;
101
+ return Ptr(new VectorAttributeValue(name, std::move(copy)));
102
+ }
103
+
104
+ private:
105
+ ValueType value_;
106
+ };
107
+
108
+ using ComplexAttr =
109
+ ScalarAttributeValue<c10::complex<double>, AttributeKind::c>;
110
+ using ComplexValsAttr =
111
+ VectorAttributeValue<c10::complex<double>, AttributeKind::cs>;
112
+ using FloatAttr = ScalarAttributeValue<double, AttributeKind::f>;
113
+ using FloatsAttr = VectorAttributeValue<double, AttributeKind::fs>;
114
+ using IntAttr = ScalarAttributeValue<int64_t, AttributeKind::i>;
115
+ using IntsAttr = VectorAttributeValue<int64_t, AttributeKind::is>;
116
+ using StringAttr = ScalarAttributeValue<std::string, AttributeKind::s>;
117
+ using StringsAttr = VectorAttributeValue<std::string, AttributeKind::ss>;
118
+ using TensorAttr = ScalarAttributeValue<at::Tensor, AttributeKind::t>;
119
+ using TensorsAttr = VectorAttributeValue<at::Tensor, AttributeKind::ts>;
120
+ using TypeAttr = ScalarAttributeValue<c10::TypePtr, AttributeKind::ty>;
121
+ using TypesAttr = VectorAttributeValue<c10::TypePtr, AttributeKind::tys>;
122
+ using IValueAttr = ScalarAttributeValue<at::IValue, AttributeKind::ival>;
123
+
124
+ struct Graph;
125
+
126
+ // We special case Graph attributes like this because we want to ensure that
127
+ // Graph::copy() is called when we clone() these attributes.
128
+ struct TORCH_API GraphAttr : public AttributeValue {
129
+ using ConstructorType = std::shared_ptr<Graph>;
130
+ using ValueType = std::shared_ptr<Graph>;
131
+ GraphAttr(Symbol name, ConstructorType value_)
132
+ : AttributeValue(name), value_(std::move(value_)) {}
133
+ ValueType& value() {
134
+ return value_;
135
+ }
136
+ Ptr clone() const override;
137
+ AttributeKind kind() const override {
138
+ return AttributeKind::g;
139
+ }
140
+
141
+ private:
142
+ std::shared_ptr<Graph> value_;
143
+ };
144
+
145
+ struct TORCH_API GraphsAttr : public AttributeValue {
146
+ using ConstructorType = std::vector<std::shared_ptr<Graph>>;
147
+ using ValueType = std::vector<std::shared_ptr<Graph>>;
148
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
149
+ GraphsAttr(Symbol name, ConstructorType value_)
150
+ : AttributeValue(name), value_(std::move(value_)) {}
151
+ ValueType& value() {
152
+ return value_;
153
+ }
154
+ AttributeKind kind() const override {
155
+ return AttributeKind::gs;
156
+ }
157
+ std::unique_ptr<AttributeValue> clone() const override;
158
+
159
+ private:
160
+ ValueType value_;
161
+ };
162
+
163
+ struct IRAttributeError : public std::exception {
164
+ IRAttributeError(Symbol name, bool defined) {
165
+ std::stringstream ss;
166
+ // NOLINTNEXTLINE(bugprone-branch-clone)
167
+ if (!defined) {
168
+ ss << "required keyword attribute '" << name.toUnqualString()
169
+ << "' is undefined";
170
+ } else {
171
+ ss << "required keyword attribute '" << name.toUnqualString()
172
+ << "' has the wrong type";
173
+ }
174
+ msg = ss.str();
175
+ }
176
+ const char* what() const noexcept override {
177
+ return msg.c_str();
178
+ }
179
+
180
+ private:
181
+ std::string msg;
182
+ };
183
+ } // namespace jit
184
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/jit/frontend/source_range.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+
8
+ // helpers for handling constants in the IR
9
+ // - create constant nodes from ints, floats, complex, intlist, Tensors, and
10
+ // other types
11
+ // - implement primitive constant ops.
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ using ::c10::IValue;
16
+
17
+ struct Graph;
18
+ struct Value;
19
+
20
+ // thrown when insertConstant cannot encode the IValue into a graph
21
+ struct TORCH_API constant_not_supported_error : public std::runtime_error {
22
+ using runtime_error::runtime_error;
23
+ };
24
+
25
+ TORCH_API Value* insertConstant(
26
+ Graph& g,
27
+ const IValue& val,
28
+ c10::optional<SourceRange> loc = c10::nullopt,
29
+ c10::optional<ScopePtr> scope = c10::nullopt);
30
+
31
+ // note: prefer g.insertConsant(val, loc) which does exactly the same thing
32
+ // this function is only declared/defined here because its implementation is
33
+ // closely related to the implementation of prim::Constant that is also in
34
+ // constants.cpp.
35
+ //
36
+ // returns a c10::nullopt if the IValue kind cannot be inserted as a constant
37
+ TORCH_API c10::optional<Value*> tryInsertConstant(
38
+ Graph& g,
39
+ const IValue& val,
40
+ c10::optional<SourceRange> loc = c10::nullopt,
41
+ c10::optional<ScopePtr> scope = c10::nullopt);
42
+
43
+ ////////////////////////////////////////////////////////////////////////////////
44
+ // Helper for retrieving constants
45
+ ////////////////////////////////////////////////////////////////////////////////
46
+
47
+ // attempt to convert a (possibly constant) Value* into an interpreter value
48
+ // (IValue). returns c10::nullopt if the Value* was not constant
49
+ TORCH_API c10::optional<IValue> toIValue(const Value* v);
50
+
51
+ // if a value is a constant then try to turn into type T using the
52
+ // same rules as the interpreter
53
+ template <typename T>
54
+ c10::optional<T> constant_as(const Value* v) {
55
+ if (auto ivalue = toIValue(v)) {
56
+ return ivalue->to<T>();
57
+ }
58
+ return c10::nullopt;
59
+ }
60
+ } // namespace jit
61
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/Exception.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Intrusive doubly linked lists with sane reverse iterators.
9
+ // The header file is named generic_graph_node_list.h because it is ONLY
10
+ // used for Graph's Node lists, and if you want to use it for other
11
+ // things, you will have to do some refactoring.
12
+ //
13
+ // At the moment, the templated type T must support a few operations:
14
+ //
15
+ // - It must have a field: T* next_in_graph[2] = { nullptr, nullptr };
16
+ // which are used for the intrusive linked list pointers.
17
+ //
18
+ // - It must have a method 'destroy()', which removes T from the
19
+ // list and frees a T.
20
+ //
21
+ // In practice, we are only using it with Node and const Node. 'destroy()'
22
+ // needs to be renegotiated if you want to use this somewhere else.
23
+ //
24
+ // Regardless of the iteration direction, iterators always physically point
25
+ // to the element they logically point to, rather than
26
+ // the off-by-one behavior for all standard library reverse iterators like
27
+ // std::list.
28
+
29
+ // The list is includes two sentinel nodes, one at the beginning and one at the
30
+ // end with a circular link between them. It is an error to insert nodes after
31
+ // the end sentinel node but before the beginning node:
32
+
33
+ // Visualization showing only the next() links:
34
+ // HEAD -> first -> second -> ... -> last -> TAIL
35
+ // ^------------------------------------------
36
+
37
+ // Visualization showing only the prev() links:
38
+ // HEAD <- first <- second <- ... <- last <- TAIL
39
+ // ------------------------------------------^
40
+
41
+ static constexpr int kNextDirection = 0;
42
+ static constexpr int kPrevDirection = 1;
43
+
44
+ template <typename T>
45
+ struct generic_graph_node_list;
46
+
47
+ template <typename T>
48
+ struct generic_graph_node_list_iterator;
49
+
50
+ struct Node;
51
+ using graph_node_list = generic_graph_node_list<Node>;
52
+ using const_graph_node_list = generic_graph_node_list<const Node>;
53
+ using graph_node_list_iterator = generic_graph_node_list_iterator<Node>;
54
+ using const_graph_node_list_iterator =
55
+ generic_graph_node_list_iterator<const Node>;
56
+
57
+ template <typename T>
58
+ struct generic_graph_node_list_iterator {
59
+ generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {}
60
+ generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {}
61
+ generic_graph_node_list_iterator(
62
+ const generic_graph_node_list_iterator& rhs) = default;
63
+ generic_graph_node_list_iterator(
64
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
65
+ generic_graph_node_list_iterator& operator=(
66
+ const generic_graph_node_list_iterator& rhs) = default;
67
+ generic_graph_node_list_iterator& operator=(
68
+ generic_graph_node_list_iterator&& rhs) noexcept = default;
69
+ T* operator*() const {
70
+ return cur;
71
+ }
72
+ T* operator->() const {
73
+ return cur;
74
+ }
75
+ generic_graph_node_list_iterator& operator++() {
76
+ AT_ASSERT(cur);
77
+ cur = cur->next_in_graph[d];
78
+ return *this;
79
+ }
80
+ generic_graph_node_list_iterator operator++(int) {
81
+ generic_graph_node_list_iterator old = *this;
82
+ ++(*this);
83
+ return old;
84
+ }
85
+ generic_graph_node_list_iterator& operator--() {
86
+ AT_ASSERT(cur);
87
+ cur = cur->next_in_graph[reverseDir()];
88
+ return *this;
89
+ }
90
+ generic_graph_node_list_iterator operator--(int) {
91
+ generic_graph_node_list_iterator old = *this;
92
+ --(*this);
93
+ return old;
94
+ }
95
+
96
+ // erase cur without invalidating this iterator
97
+ // named differently from destroy so that ->/. bugs do not
98
+ // silently cause the wrong one to be called.
99
+ // iterator will point to the previous entry after call
100
+ void destroyCurrent() {
101
+ T* n = cur;
102
+ cur = cur->next_in_graph[reverseDir()];
103
+ n->destroy();
104
+ }
105
+ generic_graph_node_list_iterator reverse() {
106
+ return generic_graph_node_list_iterator(cur, reverseDir());
107
+ }
108
+
109
+ private:
110
+ int reverseDir() {
111
+ return d == kNextDirection ? kPrevDirection : kNextDirection;
112
+ }
113
+ T* cur;
114
+ int d; // direction 0 is forward 1 is reverse, see next_in_graph
115
+ };
116
+
117
+ template <typename T>
118
+ struct generic_graph_node_list {
119
+ using iterator = generic_graph_node_list_iterator<T>;
120
+ using const_iterator = generic_graph_node_list_iterator<const T>;
121
+ generic_graph_node_list_iterator<T> begin() {
122
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[d], d);
123
+ }
124
+ generic_graph_node_list_iterator<const T> begin() const {
125
+ return generic_graph_node_list_iterator<const T>(head->next_in_graph[d], d);
126
+ }
127
+ generic_graph_node_list_iterator<T> end() {
128
+ return generic_graph_node_list_iterator<T>(head->next_in_graph[!d], d);
129
+ }
130
+ generic_graph_node_list_iterator<const T> end() const {
131
+ return generic_graph_node_list_iterator<const T>(
132
+ head->next_in_graph[!d], d);
133
+ }
134
+ generic_graph_node_list_iterator<T> rbegin() {
135
+ return reverse().begin();
136
+ }
137
+ generic_graph_node_list_iterator<const T> rbegin() const {
138
+ return reverse().begin();
139
+ }
140
+ generic_graph_node_list_iterator<T> rend() {
141
+ return reverse().end();
142
+ }
143
+ generic_graph_node_list_iterator<const T> rend() const {
144
+ return reverse().end();
145
+ }
146
+ generic_graph_node_list reverse() {
147
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
148
+ }
149
+ const generic_graph_node_list reverse() const {
150
+ return generic_graph_node_list(head->next_in_graph[!d], !d);
151
+ }
152
+ T* front() {
153
+ return head->next_in_graph[d];
154
+ }
155
+ const T* front() const {
156
+ return head->next_in_graph[d];
157
+ }
158
+ T* back() {
159
+ return head->next_in_graph[!d];
160
+ }
161
+ const T* back() const {
162
+ return head->next_in_graph[!d];
163
+ }
164
+ generic_graph_node_list(T* head, int d) : head(head), d(d) {}
165
+
166
+ private:
167
+ T* head; // both head and tail are sentinel nodes
168
+ // the first real node is head->next_in_graph[d]
169
+ // the tail sentinel is head->next_in_graph[!d]
170
+ int d;
171
+ };
172
+
173
+ template <typename T>
174
+ static inline bool operator==(
175
+ generic_graph_node_list_iterator<T> a,
176
+ generic_graph_node_list_iterator<T> b) {
177
+ return *a == *b;
178
+ }
179
+
180
+ template <typename T>
181
+ static inline bool operator!=(
182
+ generic_graph_node_list_iterator<T> a,
183
+ generic_graph_node_list_iterator<T> b) {
184
+ return *a != *b;
185
+ }
186
+
187
+ } // namespace jit
188
+ } // namespace torch
189
+
190
+ namespace std {
191
+
192
+ template <typename T>
193
+ struct iterator_traits<torch::jit::generic_graph_node_list_iterator<T>> {
194
+ using difference_type = int64_t;
195
+ using value_type = T*;
196
+ using pointer = T**;
197
+ using reference = T*&;
198
+ using iterator_category = bidirectional_iterator_tag;
199
+ };
200
+
201
+ } // namespace std
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ TORCH_API TypePtr getTensorType(const at::Tensor& t, bool complete);
11
+
12
+ TORCH_API TypePtr inferShapeAndTypeForInput(
13
+ TypePtr input_type,
14
+ Stack::const_iterator& s_iter,
15
+ const Stack::const_iterator& s_iter_end,
16
+ bool complete);
17
+
18
+ TORCH_API void setInputTensorTypes(
19
+ Graph& g,
20
+ const Stack& stack,
21
+ bool complete,
22
+ const std::vector<int>& param_count_list = {});
23
+
24
+ } // namespace jit
25
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h ADDED
@@ -0,0 +1,1841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/attributes.h>
4
+ #include <torch/csrc/jit/ir/graph_node_list.h>
5
+ #include <torch/csrc/jit/ir/named_value.h>
6
+ #include <torch/csrc/jit/ir/scope.h>
7
+ #include <torch/csrc/jit/runtime/operator.h>
8
+
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/utils/python_stub.h>
11
+ #include <torch/csrc/utils/schema_info.h>
12
+
13
+ #include <ATen/Utils.h>
14
+ #include <ATen/core/Tensor.h>
15
+ #include <ATen/core/dynamic_type.h>
16
+ #include <ATen/core/enum_type.h>
17
+ #include <ATen/core/functional.h>
18
+ #include <ATen/core/interned_strings.h>
19
+ #include <ATen/core/ivalue.h>
20
+ #include <ATen/core/jit_type.h>
21
+ #include <c10/util/ArrayRef.h>
22
+ #include <c10/util/Exception.h>
23
+ #include <c10/util/Optional.h>
24
+
25
+ #include <functional>
26
+ #include <iosfwd>
27
+ #include <unordered_set>
28
+ #include <vector>
29
+
30
+ // Forward declare, the real meat is in python_ir.cpp
31
+ template <class T>
32
+ class THPPointer;
33
+ using THPObjectPtr = THPPointer<PyObject>;
34
+ using pyobj_list = std::vector<THPObjectPtr>;
35
+
36
+ namespace torch {
37
+ namespace jit {
38
+ namespace utils {
39
+ TORCH_API std::string getNodesModuleHierarchy(const Node& n);
40
+ } // namespace utils
41
+ class AliasDb;
42
+
43
+ using ::c10::Argument;
44
+ using ::c10::FunctionSchema;
45
+ using ::c10::Symbol;
46
+
47
+ using ::c10::ivalue::Shared;
48
+
49
+ using ::c10::IValue;
50
+ using ::c10::ivalue::Future;
51
+
52
+ using ::c10::ivalue::ConstantString;
53
+
54
+ #define C10_USING(T) using ::c10::T;
55
+ C10_FORALL_TYPES(C10_USING)
56
+ #undef C10_USING
57
+
58
+ #define C10_USING(T) using ::c10::T##Ptr;
59
+ C10_FORALL_TYPES(C10_USING)
60
+ #undef C10_USING
61
+
62
+ using ::c10::Type;
63
+ using ::c10::TypeEnv;
64
+ using ::c10::TypePtr;
65
+
66
+ using ::c10::getTypePtr;
67
+ using ::c10::MatchTypeReturn;
68
+ using ::c10::TypeKind;
69
+
70
+ using ::c10::fmap;
71
+
72
+ namespace prim {
73
+ using namespace ::c10::prim;
74
+ }
75
+ namespace attr {
76
+ using namespace ::c10::attr;
77
+ }
78
+ namespace aten {
79
+ using namespace ::c10::aten;
80
+ }
81
+ namespace cuda {
82
+ #if !defined(USE_ROCM)
83
+ using namespace ::c10::cuda;
84
+ #endif
85
+ } // namespace cuda
86
+
87
+ struct Function;
88
+ struct GraphFunction;
89
+ struct MatchedSchema;
90
+
91
+ // A Graph represents one "function" of computation.
92
+ // It uses a simple ownership model where the graph owns all the nodes inside
93
+ // it. All references inside the graph are raw pointers. Destroying the Graph
94
+ // will invalidate any pointers to nodes in the graph.
95
+ struct Graph;
96
+
97
+ // Node is the base class of the IR graph. It represents one computation
98
+ // and dependencies on a list of Values. The "prim-ops", so to speak.
99
+ struct Node;
100
+
101
+ // A Value represents an input or output to node that is either a
102
+ // Tensor or an opaque Handle object, as determined by type().
103
+ struct Value;
104
+
105
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
106
+ TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n);
107
+
108
+ // A list of nodes, with inputs and outputs
109
+ struct Block;
110
+
111
+ // Each use is represented by this type, see 'Node::uses()'
112
+ // 'user' is the consumer of the value, 'offset' is the index into
113
+ // 'user's input this where the producers will be found.
114
+ struct Use {
115
+ Use(Node* user, size_t offset) : user(user), offset(offset) {}
116
+ Node* user;
117
+ size_t offset;
118
+
119
+ bool operator==(const Use& b) {
120
+ return user == b.user && offset == b.offset;
121
+ }
122
+ };
123
+
124
+ // Note [User node does not uniquely identify use]
125
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
126
+ // A while back, we wrote some code manipulating uses that looked like this:
127
+ //
128
+ // for (auto& use : used_val->uses_) {
129
+ // if (use.user == this_node) {
130
+ // use.offset += 1;
131
+ // break;
132
+ // }
133
+ // }
134
+ //
135
+ // This code is trying to find a particular use (our node's use) to update it.
136
+ // However, it's wrong: there may be *multiple* uses of a value %x in a node,
137
+ // as might be the case in this IR:
138
+ //
139
+ // %y = Add %x %x
140
+ //
141
+ // In this case, there are two uses of %x whose user is the node 'Add %x %x'.
142
+ // So, "use induced by this node" is not a well-formed concept.
143
+ //
144
+ // If you are looking for "use induced by an input", it's best to use
145
+ // findUseForInput() to get it.
146
+
147
+ // the list types are intentionally simple, but we type-def
148
+ // them here so if we need to change them, refactoring will be easier
149
+ using node_list = std::vector<Node*>;
150
+ using value_list = std::vector<Value*>;
151
+ using use_list = std::vector<Use>;
152
+ template <typename T>
153
+ using ArrayRef = at::ArrayRef<T>;
154
+ using NodeKind = Symbol;
155
+ using topo_position_t = int64_t;
156
+ using ValueSet = std::unordered_set<const Value*>;
157
+
158
+ struct OperatorSet;
159
+ template <typename T>
160
+ struct OperatorMap;
161
+
162
+ // This is a wrapper to allow invalidating the Python object
163
+ // safely when the C++ object for a Node/Value/Block is deleted
164
+ // like much of graph, it isn't safe for different threads to
165
+ // access the same graph
166
+ template <typename T>
167
+ struct Wrap {
168
+ explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {}
169
+ void clear() {
170
+ if (clear_cb) {
171
+ clear_cb(elem);
172
+ }
173
+ elem = nullptr;
174
+ }
175
+ T* elem;
176
+ void (*clear_cb)(void*);
177
+ };
178
+
179
+ struct Value {
180
+ AT_DISALLOW_COPY_AND_ASSIGN(Value);
181
+ Value(Node* node_, size_t offset_);
182
+
183
+ private:
184
+ friend struct Node;
185
+ friend struct Graph;
186
+ Node* node_;
187
+ size_t offset_;
188
+ size_t unique_ = 0; // unique id
189
+ use_list uses_;
190
+ std::string unique_name_;
191
+ TypePtr type_;
192
+ // a managing wrapper for Python to allow invalidation
193
+ std::shared_ptr<Wrap<Value>> wrap_;
194
+
195
+ public:
196
+ Value* setType(TypePtr type);
197
+ TORCH_API void inferTypeFrom(const at::Tensor& output);
198
+ TORCH_API void inferTypeFrom(
199
+ const c10::intrusive_ptr<c10::ivalue::Object>& output);
200
+ const TypePtr& type() const {
201
+ AT_ASSERT(type_ != nullptr);
202
+ return type_;
203
+ }
204
+ bool requires_grad() const {
205
+ return type()->requires_grad();
206
+ }
207
+ bool isCompleteTensor() const {
208
+ if (auto pt = type()->cast<TensorType>()) {
209
+ return pt->isComplete();
210
+ }
211
+ return false;
212
+ }
213
+ TORCH_API bool mustBeNone() const;
214
+ TORCH_API bool mustNotBeNone() const;
215
+ size_t unique() const {
216
+ return unique_;
217
+ }
218
+ bool hasDebugName() const {
219
+ return !unique_name_.empty();
220
+ }
221
+ static bool isValidName(const std::string& name);
222
+ TORCH_API Value* setDebugName(const std::string& name);
223
+ std::string debugName() const {
224
+ if (hasDebugName()) {
225
+ return unique_name_;
226
+ }
227
+ return c10::to_string(unique());
228
+ }
229
+ TORCH_API std::string debugNameBase() const;
230
+ Node* node() {
231
+ return node_;
232
+ }
233
+ size_t offset() const {
234
+ return offset_;
235
+ }
236
+ void setOffset(size_t offset) {
237
+ offset_ = offset;
238
+ }
239
+ const Node* node() const {
240
+ return node_;
241
+ }
242
+
243
+ /**
244
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
245
+ * Check #87343 for details.
246
+ */
247
+ Graph* owningGraph();
248
+ const Graph* owningGraph() const;
249
+ // TODO: make this more const correct
250
+ const use_list& uses() const {
251
+ return uses_;
252
+ }
253
+
254
+ bool hasUses() const {
255
+ return !uses().empty();
256
+ }
257
+
258
+ TORCH_API void replaceFirstUseWith(Value* newValue);
259
+
260
+ // Replaces all uses of this value with 'newValue'.
261
+ //
262
+ // Given: %3 = f(%1, %2)
263
+ // %4 = g(%3)
264
+ // %5 = h(%3, %3)
265
+ // Execute: %3.replaceAllUsesWith(%6)
266
+ // Result: %3 = f(%1, %2)
267
+ // %4 = g(%6)
268
+ // %5 = h(%6, %6)
269
+ TORCH_API void replaceAllUsesWith(Value* newValue);
270
+
271
+ // Replaces all uses of this value with 'newValue' after 'node'.
272
+ // Given: %3 = f(%1, %2)
273
+ // %4 = g(%3)
274
+ // %5 = inplace_(%3)
275
+ // %6 = h(%3, %3)
276
+ // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5)
277
+ // Result: %3 = f(%1, %2)
278
+ // %4 = g(%3)
279
+ // %5 = inplace_(%3)
280
+ // %6 = h(%5, %5)
281
+ // XXX: does not check scoping legality, consider using
282
+ // replaceAllUsesDominatedByNodeWith
283
+ TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue);
284
+
285
+ // Replaces all uses of this value with 'newValue' that are dominated by
286
+ // 'node'. Given:
287
+ // x = op(...).
288
+ // if cond:
289
+ // z = foo(..)
290
+ // bar(x)
291
+ // else:
292
+ // print(x)
293
+ // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x)
294
+ // but not print(x) because print is not dominated by foo.
295
+ // replaceAllUsesAfterNode does not check domination, so in this example
296
+ // it would produce invalid IR.
297
+ TORCH_API void replaceAllUsesDominatedByNodeWith(
298
+ const Node* node,
299
+ Value* newValue);
300
+
301
+ TORCH_API Value* copyMetadata(Value* from);
302
+
303
+ TORCH_API std::shared_ptr<Wrap<Value>> wrap() {
304
+ if (!wrap_) {
305
+ wrap_ = std::make_shared<Wrap<Value>>(this);
306
+ }
307
+ return wrap_;
308
+ }
309
+
310
+ virtual ~Value() {
311
+ if (wrap_) {
312
+ wrap_->clear();
313
+ }
314
+ }
315
+ };
316
+
317
+ struct TORCH_API Node {
318
+ AT_DISALLOW_COPY_AND_ASSIGN(Node);
319
+ friend struct Graph;
320
+ friend struct Block;
321
+ friend struct Value;
322
+ friend graph_node_list;
323
+ friend const_graph_node_list;
324
+ friend graph_node_list_iterator;
325
+ friend const_graph_node_list_iterator;
326
+
327
+ private:
328
+ const NodeKind kind_;
329
+ std::vector<Value*> inputs_;
330
+ std::vector<Value*> outputs_;
331
+ // subblocks
332
+ std::vector<Block*> blocks_;
333
+ Graph* graph_;
334
+ Block* owning_block_;
335
+ c10::optional<SourceRange> source_range_;
336
+ ScopePtr scope_;
337
+ c10::optional<InlinedCallStackPtr> callstack_;
338
+ // Assumes FunctionSchemas are persistent, so we don't manage their lifetime.
339
+ // This field is effective a cache that's populated on attribute lookups and
340
+ // invalidated every time we perform an operation that could potentially
341
+ // change the schema. note: mutable because schema_ is effectively a cache
342
+ mutable const Operator* op_;
343
+ topo_position_t topo_position_ = 0;
344
+ // a managing wrapper for Python to allow invalidation
345
+ std::shared_ptr<Wrap<Node>> wrap_;
346
+ // Stores the full schema name, if the operator is historic
347
+ // When the operator is deprecated or the name of the operator
348
+ // is changed, we need to rely on this name
349
+ // to retrieve old schemas to successfully apply upgraders
350
+ // for this operator.
351
+ c10::optional<std::string> historic_schema_name_ = c10::nullopt;
352
+
353
+ protected:
354
+ Node(Graph* graph_, NodeKind kind_); // defined after graph
355
+ public:
356
+ // Each Node but Return/Param Nodes are associated with exactly one
357
+ // place in the Node list of the Graph. The Graph itself is a circular
358
+ // doubly-linked list. The Return Node is used as the sentinel for the
359
+ // "beginning"/"end" of the list. This means that you can tell when
360
+ // you've traversed the entire list without means worrying about null
361
+ // pointers. `next_in_graph[0]` is the pointer to the next Node, while
362
+ // `next_in_graph[1]` is the pointer to the previous Node. The
363
+ // linked list is implemented as an array to allow the same iterator
364
+ // class for forward and reversed Node lists. Taken together, this
365
+ // list also represents a topological sort of the Nodes in the Graph.
366
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays)
367
+ Node* next_in_graph[2] = {nullptr, nullptr};
368
+
369
+ std::shared_ptr<Wrap<Node>> wrap() {
370
+ if (!wrap_) {
371
+ wrap_ = std::make_shared<Wrap<Node>>(this);
372
+ }
373
+ return wrap_;
374
+ }
375
+
376
+ const c10::optional<std::string> getHistoricSchemaName() {
377
+ return historic_schema_name_;
378
+ }
379
+
380
+ void setHistoricSchemaName(const std::string& name) {
381
+ historic_schema_name_ = name;
382
+ }
383
+
384
+ Node*& next() {
385
+ return next_in_graph[kNextDirection];
386
+ }
387
+ Node*& prev() {
388
+ return next_in_graph[kPrevDirection];
389
+ }
390
+ Node* const& next() const {
391
+ return next_in_graph[kNextDirection];
392
+ }
393
+ Node* const& prev() const {
394
+ return next_in_graph[kPrevDirection];
395
+ }
396
+
397
+ NodeKind kind() const {
398
+ return kind_;
399
+ }
400
+ Node* setSourceRange(SourceRange r) {
401
+ source_range_ = std::move(r);
402
+ return this;
403
+ }
404
+ SourceRange sourceRange() const;
405
+
406
+ /**
407
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
408
+ * Check #87343 for details.
409
+ */
410
+ Graph* owningGraph() {
411
+ return graph_;
412
+ }
413
+ const Graph* owningGraph() const {
414
+ return graph_;
415
+ }
416
+ Block* owningBlock() {
417
+ return owning_block_;
418
+ }
419
+ const Block* owningBlock() const {
420
+ return owning_block_;
421
+ }
422
+ ScopePtr scope() {
423
+ return scope_;
424
+ }
425
+ void setScope(ScopePtr scope) {
426
+ scope_ = std::move(scope);
427
+ }
428
+ std::string scopeName() const {
429
+ if (!scope_) {
430
+ return "";
431
+ }
432
+ return scope_->namesFromRoot();
433
+ }
434
+
435
+ // Copies the source range, scope and callstack from another node.
436
+ Node* copyMetadata(Node* from) {
437
+ this->setSourceRange(from->sourceRange());
438
+ this->setScope(from->scope());
439
+ if (auto cs = from->callstack()) {
440
+ this->setCallStack(*cs);
441
+ }
442
+ return this;
443
+ }
444
+
445
+ c10::optional<InlinedCallStackPtr> callstack() const {
446
+ return callstack_;
447
+ }
448
+ void setCallStack(InlinedCallStackPtr cs) {
449
+ callstack_ = std::move(cs);
450
+ }
451
+
452
+ // NB: This returns an ArrayRef; that means that it will
453
+ // get invalidated if you resize inputs (e.g., using addInput)
454
+ // We can't return a std::vector<Node*>& because there's no
455
+ // way to soundly cast to std::vector<const Node*> (an insane
456
+ // implementation of std::vector could make this representationally
457
+ // different.)
458
+ at::ArrayRef<Value*> inputs() {
459
+ return inputs_;
460
+ }
461
+ at::ArrayRef<const Value*> inputs() const {
462
+ // Vectors are not convertible in const-ness of elements, but
463
+ // raw pointers are.
464
+ return {inputs_.data(), inputs_.size()};
465
+ }
466
+ // NB: This returns an ArrayRef; that means that it will
467
+ // get invalidated if you resize inputs (e.g., using addInput)
468
+ // We can't return a std::vector<Node*>& because there's no
469
+ // way to soundly cast to std::vector<const Node*> (an insane
470
+ // implementation of std::vector could make this representationally
471
+ // different.)
472
+ at::ArrayRef<Value*> outputs() {
473
+ return outputs_;
474
+ }
475
+ at::ArrayRef<const Value*> outputs() const {
476
+ // Vectors are not convertible in const-ness of elements, but
477
+ // raw pointers are.
478
+ return {outputs_.data(), outputs_.size()};
479
+ }
480
+ Value* output(size_t i) const {
481
+ return outputs_.at(i);
482
+ }
483
+ bool hasUses() const {
484
+ for (auto o : outputs()) {
485
+ if (!o->uses().empty()) {
486
+ return true;
487
+ }
488
+ }
489
+ return false;
490
+ }
491
+
492
+ void replaceAllUsesWith(Node* n);
493
+
494
+ // replaces `this` with a new node with the same inputs and outputs
495
+ // but a new node symbol. does not destroy `this`
496
+ Node* replaceWithNewSymbol(Symbol new_symbol);
497
+
498
+ // Checks if this node is dominated by `dominator` which means that
499
+ // `dominator` will always be executed before `this` and `dominator`
500
+ // is in scope of `this.
501
+ bool isDominatedBy(const Node* dominator) const;
502
+
503
+ // lots of things like chunk have a single input or single output, so we have
504
+ // a helper to make accessing it easier
505
+ Value* input() {
506
+ AT_ASSERT(inputs_.size() == 1);
507
+ return inputs_.at(0);
508
+ }
509
+ Value* output() {
510
+ AT_ASSERT(outputs_.size() == 1);
511
+ return outputs_.at(0);
512
+ }
513
+ const Value* output() const {
514
+ AT_ASSERT(outputs_.size() == 1);
515
+ return outputs_.at(0);
516
+ }
517
+ const Value* input() const {
518
+ AT_ASSERT(inputs_.size() == 1);
519
+ return inputs_.at(0);
520
+ }
521
+ // Access a particular input. This is a checked index.
522
+ Value* input(size_t i) const {
523
+ return inputs_.at(i);
524
+ }
525
+
526
+ bool hasNamedInput(const std::string& unqualName) const;
527
+ Value* namedInput(const std::string& unqualName) const;
528
+ Value* namedInput(Symbol name) const;
529
+
530
+ c10::optional<IValue> get(Symbol name) const;
531
+
532
+ template <typename T>
533
+ c10::optional<T> get(Symbol name) const {
534
+ if (auto v = get(name)) {
535
+ return v->template to<T>();
536
+ }
537
+ return c10::nullopt;
538
+ }
539
+
540
+ // Returns true if the value of input name is statically known
541
+ bool is_constant(Symbol name) const {
542
+ return static_cast<bool>(get(name));
543
+ }
544
+ bool mustBeNone() const;
545
+
546
+ bool isNondeterministic() const;
547
+ bool hasSideEffects() const;
548
+
549
+ // instructions lowered by the interpreter and not run in the optimized graph
550
+ bool notExecutedOp() const {
551
+ return kind_ == prim::Constant || kind_ == prim::profile ||
552
+ kind_ == prim::profile_ivalue;
553
+ }
554
+
555
+ // Graphs
556
+
557
+ // Note [Topological invariant]
558
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
559
+ // We always maintain an up-to-date topological ordering of all nodes via
560
+ // the next()/prev() links. All transformations to graphs must preserve
561
+ // this topological ordering: for example, it is only valid to 'addInput'
562
+ // with an input which is topologically before the current node.
563
+ //
564
+ // Usually, it is obvious whether or not topological order is maintained;
565
+ // for example, if you are adding nodes to the end of the topsort, it's
566
+ // impossible for them to refer to inputs that are not in the topsort.
567
+ // If it is not obvious, please comment accordingly.
568
+
569
+ // Add 'node' as an input to 'this' at the end of existing
570
+ // arguments. Returns the added node for ease of chaining.
571
+ //
572
+ // Given: %3 = f(%1, %2)
573
+ // Execute: %3.addInput(%4)
574
+ // Result: %3 = f(%1, %2, %4)
575
+ Value* addInput(Value* value);
576
+
577
+ // Add 'value' as an input to 'this' at the specified position in the
578
+ // arguments. Returns the added value for ease of chaining.
579
+ Value* insertInput(size_t i, Value* value);
580
+
581
+ // Replace the input of 'this' at position 'i' with
582
+ // 'newValue', returning the old node.
583
+ //
584
+ // Given: %3 = f(%1, %2)
585
+ // Execute: %3.replaceInput(1, %4)
586
+ // Result: %3 = f(%1, %4)
587
+ Value* replaceInput(size_t i, Value* newValue);
588
+
589
+ // Replace all occurrences of 'from' in the inputs of this
590
+ // node with 'to'. Corresponds to llvm's replaceUsesOfWith.
591
+ //
592
+ // Given: %3 = f(%1, %2, %1)
593
+ // Execute: %3.replaceInputWith(%1, %4)
594
+ // Result: %3 = f(%4, %2, %4)
595
+ void replaceInputWith(Value* from, Value* to);
596
+
597
+ Value* addOutput();
598
+
599
+ Value* insertOutput(size_t i);
600
+
601
+ void eraseOutput(size_t i);
602
+
603
+ Block* addBlock();
604
+ void eraseBlock(size_t i);
605
+
606
+ // Each Node can have a list of subblocks. These are used to define structured
607
+ // nested control flow operators such as If and Loop.
608
+ // The meaning of a block is specific to the kind of node it is in, but
609
+ // all blocks share these semantics:
610
+ // * Nested lexical scoping: If a node 'Parent' has a subblock which contains
611
+ // a node 'Child', Child can use any value that was in scope for the Parent
612
+ // node in addition to any values defined before 'Child' in the subblock.
613
+ // * The list of inputs to the block are in scope for the duration of the
614
+ // block
615
+ // * the outputs of the Parent node are not in scope for the subblocks
616
+ // Typically the inputs to a block that represents control flow act as
617
+ // as the equivalents phi-nodes in standard SSA form,
618
+ // defining a new Value to represent any term that has multiple
619
+ // definitions depending on how control flowed. Outputs of the node containing
620
+ // control flow serve a similiar purpose defining new values for variables
621
+ // that would have different definitions depending on which way control
622
+ // flowed.
623
+
624
+ at::ArrayRef<Block*> blocks() {
625
+ return blocks_;
626
+ }
627
+ at::ArrayRef<const Block*> blocks() const {
628
+ // Vectors are not convertible in const-ness of elements, but
629
+ // raw pointers are.
630
+ return {blocks_.data(), blocks_.size()};
631
+ }
632
+
633
+ // Is 'this' before 'n' in the topological order?
634
+ bool isBefore(const Node* n) const;
635
+
636
+ // Is 'this' after 'n' in the topological order?
637
+ bool isAfter(const Node* n) const;
638
+
639
+ // Insert unattached 'this' node before 'n' in the topological order.
640
+ // Returns this (for chaining).
641
+ //
642
+ // Given: %3 = f(%1, %2)
643
+ // %4 = g(%3)
644
+ // and unattached: %5 = h(%1)
645
+ // Execute: %5.insertBefore(%4)
646
+ // Result: %3 = f(%1, %2)
647
+ // %5 = h(%1)
648
+ // %4 = g(%3)
649
+ Node* insertBefore(Node* n);
650
+
651
+ // Insert unattached 'this' node after 'n' in the topological order.
652
+ // Returns this (for chaining).
653
+ //
654
+ // Given: %3 = f(%1, %2)
655
+ // %4 = g(%3)
656
+ // and unattached: %5 = h(%1)
657
+ // Execute: %5.insertAfter(%4)
658
+ // Result: %3 = f(%1, %2)
659
+ // %4 = g(%3)
660
+ // %5 = h(%1)
661
+ Node* insertAfter(Node* n);
662
+
663
+ // Move 'this' (already in the graph) after 'n' in the topological order.
664
+ //
665
+ // NOTE: Does not check that value dependencies are preserved, see
666
+ // AliasDb::moveAfterTopologicallyValid
667
+ //
668
+ // Given: %2 = f(%1)
669
+ // %3 = g(%1)
670
+ // Execute: %2.moveAfter(%3)
671
+ // Result: %3 = g(%1)
672
+ // %2 = f(%1)
673
+ //
674
+ void moveAfter(Node* n);
675
+
676
+ // Move a node 'n' (already in the graph) before 'this' in the topological
677
+ // order.
678
+ //
679
+ // NOTE: Does not check that value dependencies are preserved, see
680
+ // AliasDb::moveBeforeTopologicallyValid
681
+ //
682
+ // Given: %2 = f(%1)
683
+ // %3 = g(%1)
684
+ // Execute: %3.moveBefore(%2)
685
+ // Result: %3 = g(%1)
686
+ // %2 = f(%1)
687
+ void moveBefore(Node* n);
688
+
689
+ // Remove the input at 'i' from this node.
690
+ //
691
+ // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling
692
+ // removeInput.
693
+ //
694
+ // Given: %3 = f(%1, %2)
695
+ // Execute: %3.removeInput(1)
696
+ // Result: %3 = f(%1)
697
+ void removeInput(size_t i);
698
+
699
+ // Remove all inputs from a node.
700
+ //
701
+ // Given: %3 = f(%1, %2)
702
+ // Execute: %3.removeAllInputs()
703
+ // Result: %3 = f()
704
+ void removeAllInputs();
705
+
706
+ // Remove all outputs from a node.
707
+ //
708
+ // Given: %1, %2 = f()
709
+ // Execute:removeAllInputs()
710
+ // Result: = f()
711
+ void removeAllOutputs();
712
+
713
+ // Rearrange the ordering of inputs or outputs of a node
714
+ // Given: %3 = f(%1, %2)
715
+ // Execute: %3.permuteInputs({1, 0})
716
+ // Result: %3 = f(%2, %1)
717
+ // Each index must appear exactly once
718
+ void permuteInputs(const std::vector<size_t>& new_inputs);
719
+ void permuteOutputs(const std::vector<size_t>& new_inputs);
720
+
721
+ // iterators of the node list starting at this node
722
+ // useful for resuming a search starting at this node
723
+ inline graph_node_list_iterator iterator() {
724
+ return {this, 0};
725
+ }
726
+ inline graph_node_list_iterator reverseIterator() {
727
+ return iterator().reverse();
728
+ }
729
+ inline const_graph_node_list_iterator iterator() const {
730
+ return {this, 0};
731
+ }
732
+ inline const_graph_node_list_iterator reverseIterator() const {
733
+ return iterator().reverse();
734
+ }
735
+
736
+ // Remove 'this' from the instruction list and deallocate it.
737
+ //
738
+ // Invariant: no outputs of 'this' may have any uses.
739
+ //
740
+ // Given: %2 = f(%1)
741
+ // %3 = g(%1)
742
+ // Execute: %2.destroy()
743
+ // Result: %3 = g(%1)
744
+ void destroy();
745
+
746
+ // Dynamically cast this node to the subclass indicated by the
747
+ // template variable, returning nullptr if the cast is invalid..
748
+ //
749
+ // Example usage: if(auto s = n.cast<Select>()) { ... }
750
+ template <typename T>
751
+ T* cast() {
752
+ if (T::Kind == kind()) {
753
+ return static_cast<T*>(this);
754
+ }
755
+ return nullptr;
756
+ }
757
+ template <typename T>
758
+ const T* cast() const {
759
+ if (T::Kind == kind()) {
760
+ return static_cast<const T*>(this);
761
+ }
762
+ return nullptr;
763
+ }
764
+
765
+ template <typename T>
766
+ T* expect() {
767
+ TORCH_CHECK(
768
+ T::Kind == kind(),
769
+ "expected a ",
770
+ T::Kind.toDisplayString(),
771
+ " but found a ",
772
+ kind().toDisplayString());
773
+ return static_cast<T*>(this);
774
+ }
775
+
776
+ bool matches(const FunctionSchema& schema) const;
777
+
778
+ // XXX: this function is meant to be used with string literals only!
779
+ bool matches(
780
+ const char* signature_literal,
781
+ at::ArrayRef<Symbol> const_inputs = {}) const;
782
+
783
+ bool isMemberOf(const OperatorSet& os) const;
784
+ template <typename T>
785
+ bool isMemberOf(const OperatorMap<T>& om) const {
786
+ auto it = om.map.find(kind());
787
+ if (it == om.map.end()) {
788
+ return false;
789
+ }
790
+ for (auto& op : it->second) {
791
+ if (matches(op.first->schema())) {
792
+ return true;
793
+ }
794
+ }
795
+ return false;
796
+ }
797
+
798
+ const FunctionSchema& schema() const;
799
+ const FunctionSchema* maybeSchema() const;
800
+ const Operator& getOperator() const;
801
+ Operation getOperation() const;
802
+
803
+ const Operator* maybeOperator() const;
804
+
805
+ void dump() const;
806
+
807
+ std::ostream& print(
808
+ std::ostream& out,
809
+ size_t level,
810
+ std::vector<const Node*>* groups,
811
+ bool print_source_locations = true,
812
+ bool print_attributes = true,
813
+ bool print_scopes = true,
814
+ bool print_body = true) const;
815
+
816
+ virtual ~Node() {
817
+ if (wrap_) {
818
+ wrap_->clear();
819
+ }
820
+ }
821
+
822
+ // Methods for accessing attributes
823
+ Node* copyAttributes(const Node& rhs) {
824
+ values_.clear();
825
+ for (const AVPtr& i : rhs.values_) {
826
+ values_.push_back(i->clone());
827
+ }
828
+ return this;
829
+ }
830
+ bool hasAttribute(Symbol name) const {
831
+ AT_ASSERT(name.is_attr());
832
+ return findAttr(name, false) != values_.end();
833
+ }
834
+ bool hasAttributeS(const std::string& name) const {
835
+ return hasAttribute(Symbol::attr(name));
836
+ }
837
+ AttributeKind kindOf(Symbol name) const {
838
+ AT_ASSERT(name.is_attr());
839
+ return (*findAttr(name, true))->kind();
840
+ }
841
+ AttributeKind kindOfS(const std::string& name) const {
842
+ return kindOf(Symbol::attr(name));
843
+ }
844
+ Node* removeAttribute(Symbol name) {
845
+ AT_ASSERT(name.is_attr());
846
+ values_.erase(findAttr(name, true));
847
+ return this;
848
+ }
849
+ Node* removeAttributeS(const std::string& name) {
850
+ return removeAttribute(Symbol::attr(name));
851
+ }
852
+ bool hasAttributes() const {
853
+ return !values_.empty();
854
+ }
855
+ size_t numAttributes() const {
856
+ return values_.size();
857
+ }
858
+ // The names are returned in order, since name actually is the index.
859
+ std::vector<Symbol> attributeNames() const {
860
+ std::vector<Symbol> names;
861
+ names.reserve(values_.size());
862
+ for (const AVPtr& a : values_) {
863
+ names.push_back(a->name);
864
+ }
865
+ return names;
866
+ }
867
+ std::vector<const char*> attributeNamesS() const {
868
+ std::vector<const char*> names;
869
+ names.reserve(values_.size());
870
+ for (const AVPtr& a : values_) {
871
+ names.push_back(a->name.toUnqualString());
872
+ }
873
+ return names;
874
+ }
875
+
876
+ #define CREATE_ACCESSOR(Kind, method) \
877
+ Node* method##_(Symbol name, Kind##Attr::ConstructorType v) { \
878
+ return setAttr<Kind##Attr>( \
879
+ name, std::forward<Kind##Attr::ConstructorType>(v)); \
880
+ } \
881
+ const Kind##Attr::ValueType& method(Symbol name) const { \
882
+ return getAttr<Kind##Attr>(name); \
883
+ }
884
+
885
+ CREATE_ACCESSOR(Float, f)
886
+ CREATE_ACCESSOR(Complex, c)
887
+ CREATE_ACCESSOR(Floats, fs)
888
+ CREATE_ACCESSOR(ComplexVals, cs)
889
+ CREATE_ACCESSOR(String, s)
890
+ CREATE_ACCESSOR(Strings, ss)
891
+ CREATE_ACCESSOR(Int, i)
892
+ CREATE_ACCESSOR(Ints, is)
893
+ CREATE_ACCESSOR(Graph, g)
894
+ CREATE_ACCESSOR(Graphs, gs)
895
+ CREATE_ACCESSOR(Type, ty)
896
+ CREATE_ACCESSOR(Types, tys)
897
+ CREATE_ACCESSOR(IValue, ival)
898
+
899
+ #undef CREATE_ACCESSOR
900
+
901
+ // Our Graphs are not very const-correct, so we need to allow returning
902
+ // non-const references too
903
+ GraphAttr::ValueType& g(Symbol name) {
904
+ return getAttr<GraphAttr>(name);
905
+ }
906
+
907
+ // does not use CREATE_ACCESSOR because we need additional asserts
908
+ Node* t_(Symbol name, TensorAttr::ConstructorType v) {
909
+ return setAttr<TensorAttr>(
910
+ name, std::forward<TensorAttr::ConstructorType>(v));
911
+ }
912
+ const TensorAttr::ValueType& t(Symbol name) const {
913
+ return getAttr<TensorAttr>(name);
914
+ }
915
+
916
+ Node* ts_(Symbol name, TensorsAttr::ConstructorType v) {
917
+ return setAttr<TensorsAttr>(
918
+ name, std::forward<TensorsAttr::ConstructorType>(v));
919
+ }
920
+ const TensorsAttr::ValueType& ts(Symbol name) const {
921
+ return getAttr<TensorsAttr>(name);
922
+ }
923
+
924
+ Block* findCommonAncestorBlockWith(Node* n);
925
+
926
+ size_t blocksFromGraphBlock();
927
+
928
+ private:
929
+ void printAttrValue(std::ostream& out, const Symbol& name) const;
930
+ void printAttributes(std::ostream& out, bool ignore_subgraph) const;
931
+
932
+ template <typename T>
933
+ Node* setAttr(Symbol name, typename T::ConstructorType v) {
934
+ AT_ASSERT(name.is_attr());
935
+ auto it = findAttr(name, false);
936
+ auto nv = AVPtr(new T(name, std::forward<typename T::ConstructorType>(v)));
937
+ // NOLINTNEXTLINE(bugprone-branch-clone)
938
+ if (it == values_.end()) {
939
+ values_.push_back(std::move(nv));
940
+ } else {
941
+ *it = std::move(nv);
942
+ }
943
+ return this;
944
+ }
945
+ template <typename T>
946
+ typename T::ValueType& getAttr(Symbol name) const {
947
+ AT_ASSERT(name.is_attr());
948
+ auto it = findAttr(name, true);
949
+ auto* child = dynamic_cast<T*>(it->get());
950
+ if (child == nullptr) {
951
+ throw IRAttributeError(name, true);
952
+ }
953
+ return child->value();
954
+ }
955
+ using AVPtr = AttributeValue::Ptr;
956
+ // NB: For determinism, we use a vector rather than a hash map. This does
957
+ // mean that lookups are O(n), so you shouldn't use Attributes to store
958
+ // a big pile of messages.
959
+ std::vector<AVPtr> values_;
960
+ std::vector<AVPtr>::iterator findAttr(Symbol name, bool required) {
961
+ AT_ASSERT(name.is_attr());
962
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
963
+ return v->name == name;
964
+ });
965
+ if (required && it == values_.end()) {
966
+ throw IRAttributeError(name, false);
967
+ }
968
+ AT_ASSERT(!required || it != values_.end());
969
+ return it;
970
+ }
971
+ std::vector<AVPtr>::const_iterator findAttr(Symbol name, bool required)
972
+ const {
973
+ AT_ASSERT(name.is_attr());
974
+ auto it = std::find_if(values_.begin(), values_.end(), [&](const AVPtr& v) {
975
+ return v->name == name;
976
+ });
977
+ if (required && it == values_.end()) {
978
+ throw IRAttributeError(name, false);
979
+ }
980
+ AT_ASSERT(!required || it != values_.end());
981
+ return it;
982
+ }
983
+
984
+ enum class MoveSide { BEFORE, AFTER };
985
+ bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const;
986
+
987
+ std::pair<Value*, const Argument&> findInput(Symbol name);
988
+ // Lookup iterator in use list of _input i_ that corresponds to its use of
989
+ // _this_
990
+ use_list::iterator findUseForInput(size_t i);
991
+
992
+ // remove the use of input i, this sets input i to nullptr, but
993
+ // is only used internally to Node before setting it to a new value
994
+ // or erasing the entry from the list.
995
+ Value* dropInput(size_t i);
996
+
997
+ bool inBlockList() const {
998
+ if (next() == nullptr) {
999
+ AT_ASSERT(prev() == nullptr);
1000
+ }
1001
+ return next() != nullptr;
1002
+ }
1003
+
1004
+ void removeFromList();
1005
+ void lint() const;
1006
+
1007
+ void assignTopoPosition();
1008
+
1009
+ protected:
1010
+ // subclasses must override
1011
+ // this function is used by createClone to initialize a new version
1012
+ // of a node in another graph. It should allocate a new instance of the same
1013
+ // concrete type as 'this', but in graph 'g' which might be different
1014
+ // than graph_
1015
+ virtual Node* allocNewInstance(Graph* g) {
1016
+ return new Node(g, kind());
1017
+ }
1018
+ // create a copy of all properties of Node s into this.
1019
+ // subclasses should extend if they have additional information to copy.
1020
+ // 'this' will be allocated with s->allocNewInstance(g) so it should have
1021
+ // the same concrete type as 's'
1022
+ virtual void cloneFrom(Node* s);
1023
+ };
1024
+
1025
+ struct Block {
1026
+ friend struct Node;
1027
+ friend struct Graph;
1028
+
1029
+ AT_DISALLOW_COPY_AND_ASSIGN(Block);
1030
+ TORCH_API Block(Graph* graph_, Node* node_);
1031
+
1032
+ at::ArrayRef<Value*> inputs() {
1033
+ return input_->outputs();
1034
+ }
1035
+ at::ArrayRef<const Value*> inputs() const {
1036
+ const auto& inputs = input_->outputs();
1037
+ return {inputs.data(), inputs.size()};
1038
+ }
1039
+ at::ArrayRef<Value*> outputs() {
1040
+ return output_->inputs();
1041
+ }
1042
+ at::ArrayRef<const Value*> outputs() const {
1043
+ return static_cast<const Node*>(output_)->inputs();
1044
+ }
1045
+ graph_node_list nodes() {
1046
+ return {input_, kNextDirection};
1047
+ }
1048
+ const_graph_node_list nodes() const {
1049
+ return {input_, kNextDirection};
1050
+ }
1051
+ Node* return_node() {
1052
+ return output_;
1053
+ }
1054
+ const Node* return_node() const {
1055
+ return output_;
1056
+ }
1057
+ Node* param_node() {
1058
+ return input_;
1059
+ }
1060
+ const Node* param_node() const {
1061
+ return input_;
1062
+ }
1063
+ /**
1064
+ * @warning NEVER pass raw pointer of smart pointer managed Graph to Python.
1065
+ * Check #87343 for details.
1066
+ */
1067
+ Graph* owningGraph() {
1068
+ return graph_;
1069
+ }
1070
+ const Graph* owningGraph() const {
1071
+ return graph_;
1072
+ }
1073
+ Node* owningNode() {
1074
+ return owning_node_;
1075
+ }
1076
+ const Node* owningNode() const {
1077
+ return owning_node_;
1078
+ }
1079
+
1080
+ Value* addInput(const std::string& name = "") {
1081
+ Value* v = input_->addOutput();
1082
+ v->setDebugName(name);
1083
+ return v;
1084
+ }
1085
+ Value* insertInput(size_t i, const std::string& name = "") {
1086
+ Value* v = input_->insertOutput(i);
1087
+ v->setDebugName(name);
1088
+ return v;
1089
+ }
1090
+ void eraseInput(size_t i) {
1091
+ input_->eraseOutput(i);
1092
+ }
1093
+ void removeAllInputs() {
1094
+ input_->removeAllOutputs();
1095
+ }
1096
+ size_t registerOutput(Value* v) {
1097
+ output_->addInput(v);
1098
+ return outputs().size() - 1;
1099
+ }
1100
+ size_t insertOutput(size_t i, Value* n) {
1101
+ output_->insertInput(i, n);
1102
+ return i;
1103
+ }
1104
+ void eraseOutput(size_t i) {
1105
+ output_->removeInput(i);
1106
+ }
1107
+ void removeAllOutputs() {
1108
+ output_->removeAllInputs();
1109
+ }
1110
+
1111
+ void replaceOutput(size_t i, Value* n) {
1112
+ output_->replaceInput(i, n);
1113
+ }
1114
+ void permuteOutputs(const std::vector<size_t>& new_inputs) {
1115
+ output_->permuteInputs(new_inputs);
1116
+ }
1117
+ void permuteInputs(const std::vector<size_t>& new_inputs) {
1118
+ input_->permuteOutputs(new_inputs);
1119
+ }
1120
+
1121
+ Node* appendNode(Node* n) {
1122
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1123
+ n->insertBefore(output_);
1124
+ return n;
1125
+ }
1126
+ Node* prependNode(Node* n) {
1127
+ AT_ASSERT(n->graph_ == graph_ && !n->inBlockList());
1128
+ n->insertAfter(input_);
1129
+ return n;
1130
+ }
1131
+
1132
+ // clone all inputs, nodes, and outputs from src and append them
1133
+ // to the inputs, nodes, and outputs of this block
1134
+ // value_map is used whenever a node in src references a free variable
1135
+ // in src to look up its corresponding value
1136
+ TORCH_API void cloneFrom(Block* src, std::function<Value*(Value*)> value_map);
1137
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1138
+
1139
+ TORCH_API std::shared_ptr<Wrap<Block>> wrap() {
1140
+ if (!wrap_) {
1141
+ wrap_ = std::make_shared<Wrap<Block>>(this);
1142
+ }
1143
+ return wrap_;
1144
+ }
1145
+
1146
+ virtual ~Block() {
1147
+ if (wrap_) {
1148
+ wrap_->clear();
1149
+ }
1150
+ }
1151
+
1152
+ void clear() {
1153
+ removeAllOutputs();
1154
+ for (auto it = nodes().rbegin(); it != nodes().rend(); it++) {
1155
+ it.destroyCurrent();
1156
+ }
1157
+ removeAllInputs();
1158
+ }
1159
+
1160
+ private:
1161
+ void reIndexTopology();
1162
+
1163
+ // get rid of all nodes
1164
+ // destroys in reverse order so that uses internal to this block
1165
+ // do not have to be removed before you can destroy the block
1166
+ void destroy();
1167
+
1168
+ Graph* const graph_;
1169
+ // holds outputs in a way that can be reflected
1170
+ // as a Use object
1171
+ // also used as the beginning/end of the circular node list to avoid
1172
+ // having corner cases where the list is empty.
1173
+ Node* const output_;
1174
+ Node* const input_;
1175
+ Node* const
1176
+ owning_node_; // either the node that has this block or nullptr for root
1177
+ // a managing wrapper for Python to allow invalidation
1178
+ std::shared_ptr<Wrap<Block>> wrap_;
1179
+ };
1180
+
1181
+ struct Graph : std::enable_shared_from_this<Graph> {
1182
+ AT_DISALLOW_COPY_AND_ASSIGN(Graph);
1183
+ friend struct Node;
1184
+ friend struct Value;
1185
+ friend struct Block;
1186
+
1187
+ private:
1188
+ // only used to keep track of allocated nodes
1189
+ // actual representation of Graph is done with
1190
+ // inputs, outputs, nodes
1191
+
1192
+ std::unordered_set<const Node*> all_nodes;
1193
+ std::unordered_set<const Value*> all_values;
1194
+ std::unordered_set<const Block*> all_blocks;
1195
+ size_t next_unique_;
1196
+
1197
+ std::unordered_map<std::string, Value*> unique_names_;
1198
+ // name_base_suffix tracks largest suffix currently used by all names sharing
1199
+ // same name_base. Key of this map is name_base, value is largest suffix
1200
+ // numeric value.
1201
+ std::unordered_map<std::string, size_t> name_base_suffix_;
1202
+
1203
+ ScopePtr current_scope_;
1204
+
1205
+ Block* const block_;
1206
+ // when insertNode() is called, the node is inserted before this node
1207
+ // by default this is set to append to the top level block
1208
+ Node* insert_before_;
1209
+ int64_t predicted_insert_count_ = 0;
1210
+
1211
+ c10::optional<size_t> op_version_;
1212
+
1213
+ public:
1214
+ Graph(ScopePtr scope_root = c10::make_intrusive<Scope>())
1215
+ : next_unique_(0),
1216
+ current_scope_(std::move(scope_root)),
1217
+ block_(new Block(this, nullptr)),
1218
+ insert_before_(return_node()) {}
1219
+
1220
+ at::ArrayRef<Value*> inputs() {
1221
+ return block_->inputs();
1222
+ }
1223
+ at::ArrayRef<const Value*> inputs() const {
1224
+ const Block& block = *block_;
1225
+ return block.inputs();
1226
+ }
1227
+ at::ArrayRef<Value*> outputs() {
1228
+ return block_->outputs();
1229
+ }
1230
+ at::ArrayRef<const Value*> outputs() const {
1231
+ const Block& block = *block_;
1232
+ return block.outputs();
1233
+ }
1234
+ graph_node_list nodes() {
1235
+ return block_->nodes();
1236
+ }
1237
+ const_graph_node_list nodes() const {
1238
+ const Block& block = *block_;
1239
+ return block.nodes();
1240
+ }
1241
+ Node* param_node() {
1242
+ return block_->param_node();
1243
+ }
1244
+ const Node* param_node() const {
1245
+ return block_->param_node();
1246
+ }
1247
+ Node* return_node() {
1248
+ return block_->return_node();
1249
+ }
1250
+ const Node* return_node() const {
1251
+ return block_->return_node();
1252
+ }
1253
+ const std::unordered_map<std::string, Value*>& debugNames() const {
1254
+ return unique_names_;
1255
+ }
1256
+
1257
+ TORCH_API void push_scope(const std::string& scope_name);
1258
+ TORCH_API void pop_scope();
1259
+
1260
+ ScopePtr current_scope() {
1261
+ return current_scope_;
1262
+ }
1263
+
1264
+ void set_op_version(c10::optional<size_t> version) {
1265
+ op_version_ = version;
1266
+ }
1267
+
1268
+ c10::optional<size_t> get_op_version() {
1269
+ return op_version_;
1270
+ }
1271
+
1272
+ void set_current_scope(ScopePtr scope) {
1273
+ current_scope_ = std::move(scope);
1274
+ }
1275
+
1276
+ Value* addInput(const std::string& name = "") {
1277
+ return block_->addInput(name);
1278
+ }
1279
+ Value* insertInput(size_t i, const std::string& name = "") {
1280
+ return block_->insertInput(i, name);
1281
+ }
1282
+ void eraseInput(size_t i) {
1283
+ block_->eraseInput(i);
1284
+ }
1285
+ size_t registerOutput(Value* n) {
1286
+ return block_->registerOutput(n);
1287
+ }
1288
+ void eraseOutput(size_t i) {
1289
+ block_->eraseOutput(i);
1290
+ }
1291
+
1292
+ TORCH_API Node* create(NodeKind kind, size_t num_outputs = 1);
1293
+ TORCH_API Node* create(
1294
+ NodeKind kind,
1295
+ ArrayRef<Value*> inputs,
1296
+ size_t num_outputs = 1);
1297
+
1298
+ TORCH_API Node* createNone();
1299
+ TORCH_API Node* createAutogradZero();
1300
+ TORCH_API Node* createUninitialized(TypePtr typ);
1301
+ TORCH_API Node* createWithSubgraph(Symbol kind);
1302
+ TORCH_API Node* createDifferentiableSubgraph();
1303
+ TORCH_API Node* createTuple(
1304
+ at::ArrayRef<Value*> values,
1305
+ TupleTypePtr optional_named_tuple = nullptr);
1306
+ TORCH_API Node* createTupleUnpack(Value* v);
1307
+ TORCH_API Node* createTupleIndex(
1308
+ Value* tup,
1309
+ Value* idx,
1310
+ const TypePtr& output_type);
1311
+ TORCH_API Node* createTupleSlice(
1312
+ Value* tup,
1313
+ int64_t beg,
1314
+ int64_t step_size,
1315
+ int64_t num_values);
1316
+ TORCH_API Node* createEnumName(Value* e);
1317
+ TORCH_API Node* createEnumValue(Value* e);
1318
+ TORCH_API Node* createList(
1319
+ const TypePtr& contained_type,
1320
+ at::ArrayRef<Value*> values);
1321
+ TORCH_API Node* createListUnpack(Value* v, size_t size);
1322
+ TORCH_API Node* createDict(
1323
+ const TypePtr& key_type,
1324
+ const TypePtr& value_type,
1325
+ at::ArrayRef<Value*> keys,
1326
+ at::ArrayRef<Value*> values);
1327
+ TORCH_API Node* createNumToTensor(Value* value);
1328
+ TORCH_API Node* createObject(const ClassTypePtr& type);
1329
+ TORCH_API Node* createSetAttr(
1330
+ Value* obj,
1331
+ const std::string& field,
1332
+ Value* newValue);
1333
+ TORCH_API Node* createGetAttr(Value* obj, const std::string& field);
1334
+ Value* insertGetAttr(Value* obj, const std::string& field) {
1335
+ return insertNode(createGetAttr(obj, field))->output();
1336
+ }
1337
+ TORCH_API Node* createStore(const std::string& name, Value* v);
1338
+ TORCH_API Node* createLoad(const std::string& name, const TypePtr& type);
1339
+ TORCH_API Node* createIsInstance(Value* v, at::ArrayRef<TypePtr> types);
1340
+
1341
+ TORCH_API Value* insertUncheckedCast(Value* v, TypePtr type);
1342
+
1343
+ // Insert a ToList operator with argument \p v and output type \p type.
1344
+ // \returns the output of the operation.
1345
+ TORCH_API Value* insertToList(Value* v, TypePtr type);
1346
+
1347
+ TORCH_API Value* insertFunctionCall(
1348
+ Function* callee,
1349
+ const MatchedSchema& matched);
1350
+ TORCH_API Value* insertMethodCall(
1351
+ std::string method_name,
1352
+ const MatchedSchema& matched);
1353
+
1354
+ // Note: defined in python_ir.cpp and can be used only in python extension
1355
+ Node* createPythonOp(
1356
+ THPObjectPtr&& pyobj,
1357
+ const std::string& cconv,
1358
+ pyobj_list&& scalar_args);
1359
+ // clone n, making a new node in _this_ graph.
1360
+ // use value_map to translate inputs of n to inputs of the cloned node
1361
+ // if copy_blocks is false, it will not recursively clone the nested blocks
1362
+ // this node contains.
1363
+ TORCH_API Node* createClone(
1364
+ Node* n,
1365
+ const std::function<Value*(Value*)>& value_map,
1366
+ bool copy_blocks = true);
1367
+
1368
+ // Insert constant IValue into the graph.
1369
+ TORCH_API Value* insertConstant(
1370
+ const IValue& val,
1371
+ c10::optional<SourceRange> loc = c10::nullopt,
1372
+ c10::optional<ScopePtr> scope = c10::nullopt);
1373
+
1374
+ // Schema-driven insert:
1375
+ // This inserts a node into the graph with inputs determined from args and
1376
+ // kwargs using Python argument matching rules, and checks that the op matches
1377
+ // a known schema.
1378
+ //
1379
+ // If this node successfully completes, it guarentees the node
1380
+ // is a correctly-formed invocation of opname
1381
+ TORCH_API Value* insert(
1382
+ Symbol opname,
1383
+ at::ArrayRef<NamedValue> args,
1384
+ at::ArrayRef<NamedValue> kwargs = {},
1385
+ const c10::optional<SourceRange>& range = {});
1386
+
1387
+ Node* appendNode(Node* n) {
1388
+ return block_->appendNode(n);
1389
+ }
1390
+
1391
+ Node* prependNode(Node* n) {
1392
+ return block_->prependNode(n);
1393
+ }
1394
+
1395
+ // insert before insert_before_ node
1396
+ // initialized to insert at the end of the top level block
1397
+ // can be changed with setInsertPoint()
1398
+ Node* insertNode(Node* n) {
1399
+ AT_ASSERT(
1400
+ insert_before_->inBlockList() &&
1401
+ "insert point node is no longer in a block list");
1402
+ return n->insertBefore(insert_before_);
1403
+ }
1404
+ // set where nodes are inserted to append to the end of this block
1405
+ void setInsertPoint(Block* b) {
1406
+ AT_ASSERT(b->owningGraph() == this);
1407
+ setInsertPoint(b->return_node());
1408
+ }
1409
+ // set where nodes are inserted to insert _before_ this node
1410
+ // for implementation simplicity we only support inserting before a node for
1411
+ // now
1412
+ void setInsertPoint(Node* n) {
1413
+ AT_ASSERT(n->owningGraph() == this && n->inBlockList());
1414
+ insert_before_ = n;
1415
+ predicted_insert_count_ = 0;
1416
+ }
1417
+ Node* insertPoint() {
1418
+ return insert_before_;
1419
+ }
1420
+
1421
+ // the top level block
1422
+ Block* block() {
1423
+ return block_;
1424
+ }
1425
+ const Block* block() const {
1426
+ return block_;
1427
+ }
1428
+
1429
+ // Checks well-formedness and invariants of graph
1430
+ TORCH_API void lint() const;
1431
+ // for use in debugger
1432
+ TORCH_API void dump() const;
1433
+
1434
+ TORCH_API ~Graph();
1435
+
1436
+ TORCH_API std::string toString(bool print_source_locations = true) const;
1437
+
1438
+ TORCH_API std::ostream& print(
1439
+ std::ostream& out,
1440
+ bool print_source_locations = true) const;
1441
+
1442
+ friend TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g);
1443
+
1444
+ TORCH_API std::shared_ptr<Graph> copy();
1445
+ TORCH_API std::unique_ptr<Graph> copyUnique();
1446
+ TORCH_API void remapTypes(const std::function<TypePtr(TypePtr)>& type_map);
1447
+
1448
+ private:
1449
+ friend TORCH_API void Lint(const AliasDb* db);
1450
+ TORCH_API void freeNode(Node* n);
1451
+ TORCH_API void freeValue(Value* v);
1452
+ TORCH_API void freeBlock(Block* b);
1453
+ void cloneFrom(Graph& src);
1454
+ };
1455
+
1456
+ /** \brief An utility class for setting temporary insertion points.
1457
+ *
1458
+ * When an object of this class is created, it stores the current insertion
1459
+ * point, sets the new one, and restores the original insertion point when the
1460
+ * object is destroyed.
1461
+ */
1462
+ struct WithInsertPoint {
1463
+ WithInsertPoint(Node* n) : prev_(n->owningGraph()->insertPoint()) {
1464
+ n->owningGraph()->setInsertPoint(n);
1465
+ }
1466
+ WithInsertPoint(Block* b) : WithInsertPoint(b->return_node()) {}
1467
+
1468
+ ~WithInsertPoint() {
1469
+ prev_->owningGraph()->setInsertPoint(prev_);
1470
+ }
1471
+
1472
+ private:
1473
+ Node* prev_;
1474
+ };
1475
+
1476
+ /** \brief An utility class for setting temporary scopes.
1477
+ *
1478
+ * When an object of this class is created, it stores the current scope, sets
1479
+ * the new one, and restores the original scope when the object is destroyed.
1480
+ */
1481
+ struct WithCurrentScope {
1482
+ WithCurrentScope(Graph& g, ScopePtr scope)
1483
+ : graph_(&g), prev_scope_(g.current_scope()) {
1484
+ g.set_current_scope(std::move(scope));
1485
+ }
1486
+ ~WithCurrentScope() {
1487
+ graph_->set_current_scope(prev_scope_);
1488
+ }
1489
+
1490
+ private:
1491
+ Graph* graph_;
1492
+ ScopePtr prev_scope_;
1493
+ };
1494
+
1495
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1496
+ inline Value::Value(Node* node_, size_t offset_)
1497
+ : node_(node_),
1498
+ offset_(offset_),
1499
+ unique_(node_->graph_->next_unique_++),
1500
+ type_(TensorType::get()) {
1501
+ node_->graph_->all_values.emplace(this);
1502
+ }
1503
+
1504
+ inline Value* Value::setType(TypePtr type) {
1505
+ AT_ASSERT(type);
1506
+ if (auto dyn = type->castRaw<c10::DynamicType>()) {
1507
+ type = dyn->fallback();
1508
+ }
1509
+ type_ = std::move(type);
1510
+ for (Use& use : uses_) {
1511
+ use.user->op_ = nullptr;
1512
+ }
1513
+ return this;
1514
+ }
1515
+
1516
+ inline Graph* Value::owningGraph() {
1517
+ return node()->owningGraph();
1518
+ }
1519
+
1520
+ inline const Graph* Value::owningGraph() const {
1521
+ return node()->owningGraph();
1522
+ }
1523
+
1524
+ /************* All nodes not required to be defined before Graph **************/
1525
+ struct ProfileOp : public Node {
1526
+ static const Symbol Kind;
1527
+ ProfileOp(Graph* graph, std::function<void(std::vector<IValue>&)> callback)
1528
+ : Node(graph, ::c10::prim::profile), callback_(std::move(callback)) {}
1529
+
1530
+ void cloneFrom(Node* other_) override;
1531
+ Node* allocNewInstance(Graph* g) override;
1532
+
1533
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1534
+ return callback_;
1535
+ }
1536
+
1537
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1538
+ callback_ = std::move(callback);
1539
+ }
1540
+
1541
+ bool hasSeenTensor() const {
1542
+ return has_seen_tensor_;
1543
+ }
1544
+
1545
+ void setHasSeenTensor(bool has_seen_tensor) {
1546
+ has_seen_tensor_ = has_seen_tensor;
1547
+ }
1548
+
1549
+ private:
1550
+ std::function<void(std::vector<IValue>&)> callback_;
1551
+ bool has_seen_tensor_ = false;
1552
+ };
1553
+
1554
+ struct TORCH_API ProfileIValueOp : public Node {
1555
+ static const Symbol Kind;
1556
+ ProfileIValueOp(
1557
+ Graph* graph,
1558
+ std::function<void(std::vector<IValue>&)> callback)
1559
+ : Node(graph, ::c10::prim::profile_ivalue),
1560
+ callback_(std::move(callback)) {}
1561
+
1562
+ void cloneFrom(Node* other_) override;
1563
+ Node* allocNewInstance(Graph* g) override;
1564
+
1565
+ const std::function<void(std::vector<IValue>&)>& getCallback() const {
1566
+ return callback_;
1567
+ }
1568
+
1569
+ void setCallback(std::function<void(std::vector<IValue>&)> callback) {
1570
+ callback_ = std::move(callback);
1571
+ }
1572
+
1573
+ private:
1574
+ std::function<void(std::vector<IValue>&)> callback_;
1575
+ };
1576
+
1577
+ // execute a Python function, used for Ops we can't optimize but that we want to
1578
+ // optimize around
1579
+ //
1580
+ // Note: actual implementation (ConcretePythonOp) is defined in python_ir.cpp
1581
+ // which is not included in libtorch.so. We still include some bits and pieces
1582
+ // of PythonOp here to enable writing simple passes generically. In general,
1583
+ // python-aware bits need to be moved to the descendant classes.
1584
+ struct TORCH_API PythonOp : public Node {
1585
+ using Node::Node;
1586
+
1587
+ virtual std::string name() const = 0;
1588
+ virtual void writeScalars(std::ostream& out) const = 0;
1589
+ void cloneFrom(Node* other_) override = 0;
1590
+ Node* allocNewInstance(Graph* g) override = 0;
1591
+ // recover the autograd.Function instance, if this PythonOp's function
1592
+ // was originally SomeFunction.apply
1593
+ // used in ONNX for discovering symbolics
1594
+ virtual c10::optional<THPObjectPtr> autogradFunction() const = 0;
1595
+
1596
+ virtual void lint_python() const = 0;
1597
+ };
1598
+
1599
+ TORCH_API void LintGraph(const std::shared_ptr<Graph>& graph);
1600
+
1601
+ TORCH_API at::ArrayRef<Value*> createTupleUnpack(Value* v);
1602
+
1603
+ /** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values.
1604
+ * The insertion happens at the current insertion point.
1605
+ * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE
1606
+ * values and their cloned copies in \p G.
1607
+ */
1608
+ TORCH_API std::vector<Value*> insertGraph(
1609
+ Graph& g,
1610
+ Graph& callee,
1611
+ ArrayRef<Value*> inputs);
1612
+ TORCH_API std::vector<Value*> insertGraph(
1613
+ Graph& g,
1614
+ Graph& callee,
1615
+ ArrayRef<Value*> inputs,
1616
+ std::unordered_map<Value*, Value*>& value_map);
1617
+
1618
+ /** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and
1619
+ * replace all its uses with corresponding outputs of the inserted function.
1620
+ * This asserts that the number of outputs of the original node and the
1621
+ * graph are the same.
1622
+ */
1623
+ TORCH_API std::vector<Value*> inlineCallTo(
1624
+ Node* to_replace,
1625
+ GraphFunction* callee,
1626
+ bool use_graph = true);
1627
+
1628
+ TORCH_API std::vector<Value*> inlineCallTo(
1629
+ Node* to_replace,
1630
+ GraphFunction* callee,
1631
+ Graph* callee_graph);
1632
+
1633
+ /** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a
1634
+ * tuple unpack node and return the resulting values.
1635
+ */
1636
+ TORCH_API std::vector<Value*> unpackOutputs(const std::vector<Value*>& outputs);
1637
+
1638
+ TORCH_API std::vector<Node*> findAllNodes(Graph& g, Symbol kind, bool recurse);
1639
+ TORCH_API std::vector<Node*> findAllNodes(Block& b, Symbol kind, bool recurse);
1640
+ TORCH_API std::vector<Node*> findAllNodes(
1641
+ at::ArrayRef<Block*> a,
1642
+ Symbol kind,
1643
+ bool recurse);
1644
+
1645
+ struct TORCH_API OperatorSet {
1646
+ OperatorSet(std::initializer_list<const char*> sig_literals);
1647
+ std::vector<std::shared_ptr<Operator>> getOps() const;
1648
+ void insert(std::initializer_list<const char*> sig_literals);
1649
+
1650
+ private:
1651
+ friend struct Node;
1652
+ std::unordered_map<Symbol, std::vector<std::shared_ptr<Operator>>> ops;
1653
+ };
1654
+
1655
+ template <typename T>
1656
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1657
+ struct OperatorMap {
1658
+ // Type aliasing
1659
+ using OpMapType = typename std::pair<std::shared_ptr<Operator>, T>;
1660
+ using ValueType = std::vector<OpMapType>;
1661
+ using MapType = std::unordered_map<Symbol, ValueType>;
1662
+
1663
+ OperatorMap() = default;
1664
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1665
+ explicit OperatorMap(
1666
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> init) {
1667
+ insert(init);
1668
+ }
1669
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1670
+ explicit OperatorMap(std::initializer_list<std::pair<const char*, T>> init) {
1671
+ insert(init);
1672
+ }
1673
+
1674
+ void insert(const std::shared_ptr<Operator>& op, T val) {
1675
+ // Remove if exists before insert
1676
+ erase(op);
1677
+ map[Symbol::fromQualString(op->schema().name())].emplace_back(
1678
+ std::make_pair(op, val));
1679
+ }
1680
+
1681
+ void insert(const OperatorSet& op_set, T val) {
1682
+ for (auto& op : op_set.getOps()) {
1683
+ insert(op, val);
1684
+ }
1685
+ }
1686
+
1687
+ void insert(
1688
+ std::initializer_list<std::pair<std::shared_ptr<Operator>, T>> v) {
1689
+ for (auto& el : v) {
1690
+ insert(el.first, el.second);
1691
+ }
1692
+ }
1693
+
1694
+ void insert(std::initializer_list<std::pair<const char*, T>> v) {
1695
+ for (auto& el : v) {
1696
+ insert(getOperatorForLiteral(el.first), el.second);
1697
+ }
1698
+ }
1699
+
1700
+ void erase(const std::shared_ptr<Operator>& op) {
1701
+ auto it = map.find(Symbol::fromQualString(op->schema().name()));
1702
+ if (it == map.end()) {
1703
+ return;
1704
+ }
1705
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1706
+ if (vit->first->schema() == op->schema()) {
1707
+ it->second.erase(vit);
1708
+ break;
1709
+ }
1710
+ }
1711
+ if (it->second.size() == 0) {
1712
+ map.erase(Symbol::fromQualString(op->schema().name()));
1713
+ }
1714
+ }
1715
+
1716
+ bool contains(const Operator& op) const {
1717
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1718
+ if (it == map.end()) {
1719
+ return false;
1720
+ }
1721
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1722
+ if (vit->first->schema() == op.schema()) {
1723
+ return true;
1724
+ }
1725
+ }
1726
+ return false;
1727
+ }
1728
+
1729
+ bool contains(const Node* n) const {
1730
+ return n->maybeOperator() && contains(n->getOperator());
1731
+ }
1732
+
1733
+ c10::optional<T> find(const Operator& op) {
1734
+ const auto it = map.find(Symbol::fromQualString(op.schema().name()));
1735
+ if (it == map.end()) {
1736
+ return c10::nullopt;
1737
+ }
1738
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1739
+ if (vit->first->schema() == op.schema()) {
1740
+ return vit->second;
1741
+ }
1742
+ }
1743
+ return c10::nullopt;
1744
+ }
1745
+
1746
+ // TODO: return iterator
1747
+ std::vector<OpMapType> getAllKeysAndValues() const {
1748
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1749
+ std::vector<OpMapType> keys_values;
1750
+ for (auto& symbol_mapping : map) {
1751
+ auto& vec = symbol_mapping.second;
1752
+ for (auto& pair : vec) {
1753
+ keys_values.push_back(pair);
1754
+ }
1755
+ }
1756
+ return keys_values;
1757
+ }
1758
+
1759
+ private:
1760
+ friend struct Node;
1761
+ MapType map;
1762
+ };
1763
+
1764
+ template <typename T>
1765
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
1766
+ struct FunctionSchemaMap {
1767
+ // Type aliasing
1768
+ using FuncSchemaMapType = typename std::pair<FunctionSchema, T>;
1769
+ using ValueType = std::vector<FuncSchemaMapType>;
1770
+ using MapType = std::unordered_map<Symbol, ValueType>;
1771
+
1772
+ FunctionSchemaMap() = default;
1773
+ void insert(const FunctionSchema& schema, T val) {
1774
+ // Remove if exists before insert
1775
+ erase(schema);
1776
+ map[Symbol::fromQualString(schema.name())].emplace_back(
1777
+ std::make_pair(schema, val));
1778
+ }
1779
+
1780
+ void erase(const FunctionSchema& schema) {
1781
+ auto it = map.find(Symbol::fromQualString(schema.name()));
1782
+ if (it == map.end()) {
1783
+ return;
1784
+ }
1785
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1786
+ if (vit->first == schema) {
1787
+ it->second.erase(vit);
1788
+ break;
1789
+ }
1790
+ }
1791
+ if (it->second.size() == 0) {
1792
+ map.erase(Symbol::fromQualString(schema.name()));
1793
+ }
1794
+ }
1795
+
1796
+ bool contains(const FunctionSchema& schema) const {
1797
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1798
+ if (it == map.end()) {
1799
+ return false;
1800
+ }
1801
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1802
+ if (vit->first->schema() == schema) {
1803
+ return true;
1804
+ }
1805
+ }
1806
+ return false;
1807
+ }
1808
+
1809
+ c10::optional<T> find(const FunctionSchema& schema) const {
1810
+ const auto it = map.find(Symbol::fromQualString(schema.name()));
1811
+ if (it == map.end()) {
1812
+ return c10::nullopt;
1813
+ }
1814
+ for (auto vit = it->second.begin(); vit != it->second.end(); ++vit) {
1815
+ if (vit->first == schema) {
1816
+ return vit->second;
1817
+ }
1818
+ }
1819
+ return c10::nullopt;
1820
+ }
1821
+
1822
+ // TODO: return iterator
1823
+ std::vector<FuncSchemaMapType> getAllKeysAndValues() const {
1824
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
1825
+ std::vector<FuncSchemaMapType> keys_values;
1826
+ for (auto& symbol_mapping : map) {
1827
+ auto& vec = symbol_mapping.second;
1828
+ for (auto& pair : vec) {
1829
+ keys_values.push_back(pair);
1830
+ }
1831
+ }
1832
+ return keys_values;
1833
+ }
1834
+
1835
+ private:
1836
+ friend struct Node;
1837
+ MapType map;
1838
+ };
1839
+
1840
+ } // namespace jit
1841
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir_views.h ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct IfView {
10
+ explicit IfView(Node* node) : node_(node) {
11
+ AT_ASSERT(node->kind() == ::c10::prim::If);
12
+ }
13
+ Value* cond() const {
14
+ return node_->input(0);
15
+ }
16
+ Block* thenBlock() const {
17
+ return node_->blocks().at(0);
18
+ }
19
+ Block* elseBlock() const {
20
+ return node_->blocks().at(1);
21
+ }
22
+ ArrayRef<Value*> thenOutputs() const {
23
+ return thenBlock()->outputs();
24
+ }
25
+ ArrayRef<Value*> elseOutputs() const {
26
+ return elseBlock()->outputs();
27
+ }
28
+ ArrayRef<Value*> outputs() const {
29
+ return node_->outputs();
30
+ }
31
+ Node* node() const {
32
+ return node_;
33
+ }
34
+ operator Node*() const {
35
+ return node_;
36
+ }
37
+
38
+ void permuteOutputs(const std::vector<size_t>& new_output_order) {
39
+ node_->permuteOutputs(new_output_order);
40
+ thenBlock()->permuteOutputs(new_output_order);
41
+ elseBlock()->permuteOutputs(new_output_order);
42
+ }
43
+
44
+ private:
45
+ Node* node_;
46
+ };
47
+
48
+ struct LoopView {
49
+ explicit LoopView(Node* node) : node_(node) {
50
+ AT_ASSERT(
51
+ node->kind() == ::c10::prim::Loop || node->kind() == ::c10::onnx::Loop);
52
+ }
53
+ Block* bodyBlock() const {
54
+ return node_->blocks().at(0);
55
+ }
56
+ Value* cond() const {
57
+ return node_->input(0);
58
+ }
59
+ Value* maxTripCount() const {
60
+ return node_->input(0);
61
+ }
62
+ Value* inputCond() const {
63
+ return node_->input(1);
64
+ }
65
+ Value* nextCond() const {
66
+ return bodyBlock()->outputs().at(0);
67
+ }
68
+ Value* currentTripCount() const {
69
+ return bodyBlock()->inputs().at(0);
70
+ }
71
+ ArrayRef<Value*> carriedInputs() const {
72
+ // skip trip count and cond
73
+ return node_->inputs().slice(2);
74
+ }
75
+ ArrayRef<Value*> carriedInputsWithCond() const {
76
+ // skip trip count and cond
77
+ return node_->inputs().slice(1);
78
+ }
79
+ ArrayRef<Value*> carriedOutputs() const {
80
+ return node_->outputs();
81
+ }
82
+ ArrayRef<Value*> bodyCarriedInputs() const {
83
+ // skip trip count and cond
84
+ return bodyBlock()->inputs().slice(1);
85
+ }
86
+ ArrayRef<Value*> bodyCarriedOutputs() const {
87
+ return bodyBlock()->outputs().slice(1);
88
+ }
89
+ Node* node() const {
90
+ return node_;
91
+ }
92
+ operator Node*() const {
93
+ return node_;
94
+ }
95
+
96
+ void permuteLoopCarried(const std::vector<size_t>& new_output_order) {
97
+ node_->permuteOutputs(new_output_order);
98
+ // skip trip count and cond
99
+ node_->permuteInputs(adjustIndices(2, new_output_order));
100
+ auto adjusted_block_order = adjustIndices(1, new_output_order);
101
+ bodyBlock()->permuteOutputs(adjusted_block_order);
102
+ bodyBlock()->permuteInputs(adjusted_block_order);
103
+ }
104
+
105
+ void replaceMaxTripCount(Value* new_max_trip_count) {
106
+ node_->replaceInput(0, new_max_trip_count);
107
+ }
108
+ void replaceInputCondition(Value* new_input_condition) {
109
+ node_->replaceInput(1, new_input_condition);
110
+ }
111
+
112
+ // our way of encoding loops makes them difficult to turn back into python
113
+ // syntax. we have to check properties of the condition and trip count inputs
114
+ // to figure out which one it initially was. ModifiedLoops are not directly
115
+ // mappable to either For or While
116
+ enum LoopType { While, For, ModifiedLoop };
117
+
118
+ LoopType loopType() {
119
+ auto trip_count = toIValue(maxTripCount());
120
+ auto cond_input = toIValue(inputCond());
121
+ auto cond_next = toIValue(nextCond());
122
+
123
+ bool condition_is_always_true =
124
+ cond_input && cond_input->toBool() && cond_next && cond_next->toBool();
125
+ bool trip_count_is_specified = !trip_count || // trip is not a constant
126
+ trip_count->toInt() !=
127
+ std::numeric_limits<int64_t>::max() || // it is a constant but not
128
+ // the default one
129
+ !currentTripCount()
130
+ ->uses()
131
+ .empty(); // it is actually being used in the body.
132
+
133
+ if (condition_is_always_true) {
134
+ // if the trip count was not specified this was a user-written while True:
135
+ return trip_count_is_specified ? For : While;
136
+ } else {
137
+ if (trip_count_is_specified) {
138
+ return ModifiedLoop;
139
+ }
140
+ return While;
141
+ }
142
+ }
143
+
144
+ private:
145
+ Node* node_;
146
+
147
+ // adjust index_ordering by adding indices 0 - thorugh adjust, and
148
+ // incrementing all existing inputs by adjust
149
+ static std::vector<size_t> adjustIndices(
150
+ size_t adjust,
151
+ const std::vector<size_t>& index_ordering) {
152
+ std::vector<size_t> adjusted;
153
+ adjusted.reserve(adjust + index_ordering.size());
154
+ for (const auto i : c10::irange(adjust)) {
155
+ adjusted.push_back(i);
156
+ }
157
+ for (auto index : index_ordering) {
158
+ adjusted.push_back(index + adjust);
159
+ }
160
+ return adjusted;
161
+ }
162
+ };
163
+ } // namespace jit
164
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/irparser.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <string>
5
+ #include <unordered_map>
6
+
7
+ #include <c10/util/Optional.h>
8
+ #include <torch/csrc/Export.h>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+
13
+ struct Graph;
14
+ struct Value;
15
+
16
+ // \brief Parse IR from \p STR constructing the corresponding IR in\ GRAPH.
17
+ // if parse_tensor_constants is true will construct empty tensors
18
+ // for Tensor constants with random or unitialized contents, otherwise will
19
+ // throw
20
+ TORCH_API void parseIR(
21
+ const std::string& str,
22
+ torch::jit::Graph* graph,
23
+ bool parse_tensor_constants = false);
24
+
25
+ /** \brief Parse IR from \p STR constructing the corresponding IR in\ GRAPH.
26
+ *
27
+ * \p VMAP is filled with String to Value pairs allowing to index Values in the
28
+ * newly created graph by their name in the original IR string.
29
+ * if parse_tensor_constants is true will construct empty tensors
30
+ * for Tensor constants with random or unitialized contents, otherwise will
31
+ * throw
32
+ */
33
+ TORCH_API void parseIR(
34
+ const std::string& str,
35
+ torch::jit::Graph* graph,
36
+ std::unordered_map<std::string, Value*>& vmap,
37
+ bool parse_tensor_constants = false);
38
+
39
+ } // namespace jit
40
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/named_value.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <torch/csrc/jit/frontend/source_range.h>
4
+ #include <torch/csrc/jit/ir/constants.h>
5
+ #include <torch/csrc/utils/variadic.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ struct Value;
11
+
12
+ /**
13
+ * A value with optional extra name and location information. Used during
14
+ * schema matching to provide extra error information and resolve kwargs.
15
+ */
16
+ struct NamedValue {
17
+ NamedValue(const SourceRange& loc, const std::string& name, Value* value)
18
+ : loc_(loc), name_(name), value_(value) {}
19
+ NamedValue(const SourceRange& loc, Value* value) : loc_(loc), value_(value) {}
20
+
21
+ /* implicit */ NamedValue(Value* value) : value_(value) {}
22
+ NamedValue(const std::string& name, Value* value)
23
+ : name_(name), value_(value) {}
24
+
25
+ /* implicit */ NamedValue(IValue value)
26
+ : value_(nullptr), ivalue_(std::move(value)) {}
27
+
28
+ NamedValue(const std::string& name, IValue value)
29
+ : name_(name), ivalue_(std::move(value)) {}
30
+
31
+ template <
32
+ typename T,
33
+ typename = enable_if_t<
34
+ (!std::is_same<decay_t<T>, NamedValue>::value &&
35
+ !std::is_same<decay_t<T>, Value*>::value &&
36
+ !std::is_same<decay_t<T>, IValue>::value)>>
37
+ // NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
38
+ NamedValue(T&& t) : NamedValue(IValue(std::forward<T>(t))) {}
39
+
40
+ template <
41
+ typename T,
42
+ typename = enable_if_t<
43
+ (!std::is_same<decay_t<T>, Value*>::value &&
44
+ !std::is_same<decay_t<T>, IValue>::value)>>
45
+ NamedValue(const std::string& name, T&& t)
46
+ : NamedValue(name, IValue(std::forward<T>(t))) {}
47
+
48
+ SourceRange locOr(const SourceRange& backup_location) const {
49
+ if (!loc_)
50
+ return backup_location;
51
+ return loc();
52
+ }
53
+
54
+ // note: this will insert a constant node into the graph at the current
55
+ // insert point if this NamedValue is actually a constant
56
+ Value* value(Graph& g) const {
57
+ if (!value_)
58
+ return insertConstant(
59
+ g, ivalue_); // use insertConstant to remove need to include ir.h here
60
+ return value_;
61
+ }
62
+
63
+ const std::string& name() const {
64
+ AT_ASSERT(name_);
65
+ return *name_;
66
+ }
67
+
68
+ const SourceRange& loc() const {
69
+ AT_ASSERT(loc_);
70
+ return *loc_;
71
+ }
72
+
73
+ at::TypePtr type() const;
74
+
75
+ private:
76
+ c10::optional<SourceRange> loc_;
77
+ c10::optional<std::string> name_;
78
+ Value* value_{nullptr};
79
+ // only valid if value_ == nullptr;
80
+ IValue ivalue_;
81
+ };
82
+
83
+ } // namespace jit
84
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/node_hashing.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ struct TORCH_API HashNode {
9
+ size_t operator()(const Node* k) const;
10
+ };
11
+
12
+ struct TORCH_API EqualNode {
13
+ bool operator()(const Node* lhs, const Node* rhs) const;
14
+ };
15
+
16
+ } // namespace jit
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/scope.h ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/jit_type.h>
3
+ #include <ATen/core/symbol.h>
4
+ #include <c10/util/Optional.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/frontend/source_range.h>
8
+ #include <unordered_map>
9
+
10
+ namespace torch {
11
+ namespace jit {
12
+ struct ModuleInstanceInfo;
13
+ constexpr size_t kModuleInstanceInfo = 2;
14
+
15
+ namespace utils {
16
+ std::string get_module_info(const ModuleInstanceInfo& module_instance_info);
17
+ } // namespace utils
18
+
19
+ // Scope is a node of a trie that represents the tree of nested scopes.
20
+ // Individual scopes are pushed and popped from Graph, which holds a
21
+ // pointer to the current scope. Each Node in Graph holds a pointer
22
+ // to the scope that was current when the node was created.
23
+ // The trie never needs to shrink, it only grows until it is disposed
24
+ // of when Graph is deallocated. Hence, pointers to scopes held by nodes
25
+ // will always be valid as long as Graph is alive.
26
+ struct Scope;
27
+ using ScopePtr = c10::intrusive_ptr<Scope>;
28
+ using c10::Symbol;
29
+
30
+ struct TORCH_API Scope : public c10::intrusive_ptr_target {
31
+ private:
32
+ ScopePtr parent_;
33
+ Symbol name_;
34
+ ScopePtr intrusive_from_this();
35
+
36
+ public:
37
+ Scope();
38
+
39
+ Scope(ScopePtr parent, Symbol name);
40
+
41
+ ScopePtr push(Symbol name);
42
+
43
+ ScopePtr parent();
44
+
45
+ bool isRoot() const;
46
+
47
+ bool isBlank() const;
48
+
49
+ ScopePtr getRoot();
50
+
51
+ size_t getDepth();
52
+
53
+ Symbol name() const;
54
+
55
+ std::string namesFromRoot(const std::string& separator = "/") const;
56
+ };
57
+
58
+ struct Function;
59
+ struct InlinedCallStack;
60
+
61
+ /**
62
+ * ModuleInstanceInfo is a structure to include the module type and instance
63
+ * name. It also provide public methods to get the pointer to module type and
64
+ * instance name.
65
+ *
66
+ * This structure is mainly used as a private member in InlinedCallStack, such
67
+ * that one can follow the callstack to find the relevant module hierarchy.
68
+ */
69
+ struct ModuleInstanceInfo {
70
+ private:
71
+ c10::ClassTypePtr module_type_{nullptr};
72
+ std::string instance_name_;
73
+
74
+ public:
75
+ ModuleInstanceInfo() = default;
76
+ ModuleInstanceInfo(c10::ClassTypePtr module_type, std::string instance_name);
77
+ c10::ClassTypePtr class_type() {
78
+ return module_type_;
79
+ }
80
+ c10::ClassTypePtr class_type() const {
81
+ return module_type_;
82
+ }
83
+ std::string instance_name() const {
84
+ return instance_name_;
85
+ }
86
+
87
+ bool operator==(const ModuleInstanceInfo& rhs) const {
88
+ return (class_type() == rhs.class_type()) &&
89
+ (instance_name() == rhs.instance_name());
90
+ }
91
+ };
92
+
93
+ /**
94
+ * InlinedCallStack is an element in a list representing callstack of functions
95
+ * that have been inlined.
96
+ *
97
+ * Each such element holds info about the current callsite (Function and
98
+ * SourceRange) and a pointer to the next element in the list. The last element
99
+ * in the list represents the innermost function that was inlined.
100
+ *
101
+ * For instance, if a node has a callstack
102
+ * [foo, source_range1] -> [bar, source_range2]
103
+ * it means that this node was originally from function 'bar' that was called
104
+ * at 'source_range2' in function 'foo' that was called in the current function
105
+ * at 'source_range1'.
106
+ *
107
+ * If a node did not come from any inlined function, its callstack will be
108
+ * empty.
109
+ *
110
+ * The callstack lists only grow, we never remove elements from them, which
111
+ * allows us to reuse same elements in different lists. For instance, if we
112
+ * inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham'
113
+ * and 'baz', the callstacks would look like:
114
+ *
115
+ * [baz, source_range3] --
116
+ * \
117
+ * --> [foo, source_range1] -> [bar, source_range2]
118
+ * /
119
+ * [ham, source_range4] --
120
+ */
121
+ using InlinedCallStackPtr = c10::intrusive_ptr<InlinedCallStack>;
122
+ using InlinedCallStackEntry =
123
+ std::tuple<Function*, SourceRange, c10::optional<ModuleInstanceInfo>>;
124
+
125
+ struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target {
126
+ private:
127
+ c10::optional<InlinedCallStackPtr> callee_;
128
+ Function* fn_;
129
+ // Reason for fn_name_ even though we have fn_
130
+ // Serialized callstack is used in circustmances where InlinedCallstack
131
+ // cannot be constructed during runtime, e.g. mobile runtime or
132
+ // delegated backends.
133
+ // Since in those cases we do not have Function* we store function name
134
+ // fn_name does not give you access to the same information that Function*
135
+ // does, however in mobile/delegated backend runtime we use InlindedCallStack
136
+ // for exception stack and for that purpose fn_name_ suffices.
137
+ const std::string fn_name_;
138
+ SourceRange source_range_;
139
+ InlinedCallStackPtr intrusive_from_this();
140
+ c10::optional<ModuleInstanceInfo> module_instance_info_;
141
+
142
+ public:
143
+ // Constructor for a leaf callstack node.
144
+ InlinedCallStack(Function* fn, SourceRange source_range);
145
+
146
+ // Constructor for a leaf callstack node.
147
+ InlinedCallStack(
148
+ Function* fn,
149
+ SourceRange source_range,
150
+ c10::optional<ModuleInstanceInfo> module_instance_info);
151
+
152
+ // Constructor for a leaf callstack node.
153
+ InlinedCallStack(
154
+ Function* fn,
155
+ SourceRange source_range,
156
+ c10::optional<ModuleInstanceInfo> module_instance_info,
157
+ std::string& function_name);
158
+
159
+ // Constructor for an inner callstack node.
160
+ InlinedCallStack(
161
+ InlinedCallStackPtr callee,
162
+ Function* fn,
163
+ SourceRange source_range);
164
+
165
+ InlinedCallStack(
166
+ InlinedCallStackPtr callee,
167
+ Function* fn,
168
+ SourceRange source_range,
169
+ c10::optional<ModuleInstanceInfo> module_instance_info);
170
+
171
+ InlinedCallStack(
172
+ InlinedCallStackPtr callee,
173
+ Function* fn,
174
+ SourceRange source_range,
175
+ c10::optional<ModuleInstanceInfo> module_instance_info,
176
+ std::string& function_name);
177
+
178
+ // Return next element in the callstack list.
179
+ c10::optional<InlinedCallStackPtr> callee() const;
180
+
181
+ // Return module instance associated with the current element.
182
+ c10::optional<ModuleInstanceInfo> module_instance() const;
183
+
184
+ // Returns the source range of the node
185
+ SourceRange source_range() const;
186
+
187
+ Function* function() const;
188
+
189
+ const std::string& function_name() const;
190
+
191
+ // Return callstack as a vector of [Function, SourceRange] pairs.
192
+ std::vector<InlinedCallStackEntry> vec();
193
+
194
+ void setCallee(c10::optional<InlinedCallStackPtr>);
195
+
196
+ bool operator==(const InlinedCallStack& rhs) const {
197
+ // No need to compare fn_, since source_range equivalence check
198
+ // should suffice.
199
+ return (module_instance().has_value() ==
200
+ rhs.module_instance().has_value()) &&
201
+ (module_instance().has_value() &&
202
+ module_instance().value() == rhs.module_instance().value()) &&
203
+ callee() == rhs.callee() && source_range() == rhs.source_range();
204
+ }
205
+
206
+ bool operator!=(const InlinedCallStack& rhs) const {
207
+ return !(*this == rhs);
208
+ }
209
+ };
210
+
211
+ // {source range, node name, InlinedCallStack}
212
+ // We store node name because same debug infor will be used for
213
+ // profiling as well, so we need to know op names as well.
214
+ using DebugInfoTuple =
215
+ std::tuple<SourceRange, std::string, InlinedCallStackPtr>;
216
+ constexpr size_t kDebugInfoTupleSourceRangeIndex{0};
217
+ constexpr size_t kDebugInfoTupleNodeNameIndex{1};
218
+ constexpr size_t kDebugInfoTupleInlinedCSIndex{2};
219
+ } // namespace jit
220
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/type_hashing.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct HashType {
10
+ size_t operator()(const TypePtr& type) const;
11
+ size_t operator()(const c10::ConstTypePtr& type) const;
12
+ };
13
+
14
+ struct EqualType {
15
+ bool operator()(const TypePtr& a, const TypePtr& b) const;
16
+ bool operator()(const c10::ConstTypePtr& a, const c10::ConstTypePtr& b) const;
17
+ };
18
+
19
+ } // namespace jit
20
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/clear_undefinedness.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <torch/csrc/Export.h>
7
+ #include <torch/csrc/jit/ir/ir.h>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ // Undefinedness makes argument matching fail for regular tensor operations
13
+ // if 1+ arguments are undefined or possibly undefined tensors.
14
+ // Technically, undefined tensors are **not** tensors as the regular tensor
15
+ // operations do not know how to handle them.
16
+ // However, in practice, there are guards and conversion operators that
17
+ // **always** gate regular operations if undefined tensors may be present
18
+ // Eventually, we would love to move to the world where we use optionals
19
+ // in lieu of undefined tensors.
20
+ // When this happens, this pass will be removed
21
+ TORCH_API void ClearUndefinedness(const std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/common_subexpression_elimination.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool EliminateCommonSubexpression(
9
+ const std::shared_ptr<Graph>& graph);
10
+ }
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/device_type_analysis.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+ struct Graph;
8
+
9
+ // Propagates Device type info throughout the given graph.
10
+ TORCH_API bool DeviceTypePropagation(std::shared_ptr<Graph>& graph);
11
+
12
+ } // namespace jit
13
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dtype_analysis.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ struct Graph;
10
+
11
+ // Propagate tensor properties (e.g., dtype, device, is_contiguous, layout)
12
+ // propagation on all tensor objects. Currently, we only support dtype
13
+ // propagation
14
+ TORCH_API bool DtypePropagation(std::shared_ptr<Graph>& graph);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/guard_elimination.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ TORCH_API void EliminateRedundantGuards(std::shared_ptr<Graph> graph);
17
+
18
+ } // namespace jit
19
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_autodiff_subgraphs.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API bool canRunWithAutograd(Node* node);
9
+
10
+ TORCH_API void InlineAutodiffSubgraphs(
11
+ std::shared_ptr<Graph>& graph,
12
+ size_t threshold = 5);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inline_forked_closures.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void inlineForkedClosures(std::shared_ptr<Graph>& to_clean);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_dropout.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void removeDropout(std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API void removeDropout(script::Module& module);
12
+
13
+ } // namespace jit
14
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ // see .cpp for docs
10
+ TORCH_API void RemoveInplaceOps(const std::shared_ptr<Graph>& graph);
11
+
12
+ TORCH_API void ImplicitCastForBinaryInplaceOps(Block* block);
13
+ } // namespace jit
14
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/tensorexpr_fuser.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ // Run TensorExpressions-based fuser.
11
+ // If add_composed_op is true, creates a single operation that
12
+ // performs both the runtime check that types align
13
+ // and then the dispatch to the kernel/unoptimized graph
14
+ TORCH_API void FuseTensorExprs(
15
+ std::shared_ptr<Graph>& graph,
16
+ size_t min_group_size = 2,
17
+ bool add_composed_op = false,
18
+ bool fuse_to_dynamic_shapes = false);
19
+
20
+ TORCH_API void setTensorExprFuserEnabled(bool val);
21
+ TORCH_API bool tensorExprFuserEnabled();
22
+ TORCH_API void setTensorExprDynamicShapeFusionEnabled(bool val);
23
+ TORCH_API bool tensorExprDynamicShapeFusionEnabled();
24
+ TORCH_API bool setTexprReductionsEnabled(bool value);
25
+ TORCH_API bool texprReductionsEnabled();
26
+
27
+ TORCH_API void RemoveProfileNodesAndSpecializeTypes(
28
+ std::shared_ptr<Graph>& graph);
29
+ TORCH_API bool hasTensorTypeSpecialization(Value* v);
30
+ TORCH_API void RemoveTensorTypeSpecializations(std::shared_ptr<Graph>& graph);
31
+ TORCH_API void removeTensorTypeSpecializations(Block* block);
32
+
33
+ using tensor_type_converter_t =
34
+ c10::function_ref<TensorTypePtr(const TensorTypePtr& t)>;
35
+
36
+ // inserts a TypeCheck pattern
37
+ //
38
+ // around the guarded node that has a Subgraph attribute, this inserts a pattern
39
+ //
40
+ // if TypeCheck(...):
41
+ // guarded_node
42
+ // else:
43
+ // FallbackGraph(...)
44
+ //
45
+ // The TypeCheck includes the types of all Tensor inputs to the guarded_node,
46
+ // as processed by the type_converter, a lambda
47
+ // TensorTypePtr(const TensorTypePtr& t). This allows to erase irrelevant
48
+ // aspects of the type.
49
+ //
50
+ // The Fallback graph will have the same subgraph as the guarded node (with the
51
+ // expectation that the guarded_node's subgraph will then be optimized.
52
+ TORCH_API void insertTypeGuard(
53
+ Node* guarded_node,
54
+ tensor_type_converter_t type_converter,
55
+ c10::Symbol kind);
56
+
57
+ TORCH_API bool usedOnlyInSize(Value* v);
58
+ TORCH_API Value* broadcastSizes(at::ArrayRef<Value*> sizes, AliasDb* db);
59
+
60
+ namespace tensorexpr {
61
+ TORCH_API bool isSupported(Node* node);
62
+
63
+ /// Get the modifiable custom operator set object.
64
+ ///
65
+ /// For static shapes, if a custom operator has been added to the custom
66
+ /// operator set, it will be pulled into the NNC fusion group. But it doesn't
67
+ /// work with dynamic shapes unless explicitly register the shape function via
68
+ /// `torch::jit::RegisterShapeComputeGraphForSchema` for the custom operator.
69
+ ///
70
+ /// @return Reference of the custome operator set
71
+ ///
72
+ TORCH_API OperatorSet& getCustomOperatorSet();
73
+ } // namespace tensorexpr
74
+ } // namespace jit
75
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <ATen/core/stack.h>
5
+ #include <c10/util/hash.h>
6
+ #include <c10/util/irange.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/autograd/variable.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+ #include <ostream>
11
+ #include <vector>
12
+
13
+ C10_CLANG_DIAGNOSTIC_PUSH()
14
+ #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
15
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
16
+ #endif
17
+
18
+ namespace torch::jit {
19
+
20
+ // GraphExecutor creates specializations of Graphs for different
21
+ // dimensionalitities and types of inputs.
22
+
23
+ struct ArgumentInfo {
24
+ friend struct ArgumentSpec;
25
+ using plain_data_type = uint64_t;
26
+
27
+ bool defined() const {
28
+ return defined_;
29
+ }
30
+ at::Device device() const {
31
+ return at::Device(DeviceType(dev_type_), device_);
32
+ }
33
+ // XXX: It is guaranteed that this will return false when called on non-tensor
34
+ // arguments
35
+ bool requires_grad() const {
36
+ return requires_grad_;
37
+ }
38
+ int dim() const {
39
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
40
+ return dim_;
41
+ }
42
+ at::ScalarType type() const {
43
+ return at::ScalarType(type_);
44
+ }
45
+ TypePtr toType() const {
46
+ if (!defined())
47
+ return TensorType::get();
48
+
49
+ return TensorType::create(
50
+ type(), device(), c10::optional<size_t>(dim()), requires_grad());
51
+ }
52
+ operator TypePtr() const {
53
+ return toType();
54
+ }
55
+
56
+ private:
57
+ unsigned defined_ : 1;
58
+ unsigned requires_grad_ : 1;
59
+ unsigned : 5;
60
+ unsigned dim_ : 8;
61
+ unsigned device_ : 8;
62
+ unsigned type_ : 8;
63
+ unsigned dev_type_ : 16;
64
+ unsigned : 16;
65
+ };
66
+
67
+ static_assert(
68
+ std::is_standard_layout<ArgumentInfo>::value,
69
+ "ArgumentInfo is to be a POD struct");
70
+ static_assert(
71
+ sizeof(ArgumentInfo) == sizeof(ArgumentInfo::plain_data_type),
72
+ "ArgumentInfo is expected to be a 32-bit struct");
73
+
74
+ struct ArgumentSpec {
75
+ ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs)
76
+ : hash_code(c10::hash_combine(
77
+ num_flat_tensor_inputs,
78
+ num_flat_optional_inputs)) {
79
+ tensor_args.reserve(num_flat_tensor_inputs);
80
+ optional_presence.reserve(num_flat_optional_inputs);
81
+ }
82
+
83
+ void addOptional(const IValue& input) {
84
+ bool is_present = !input.isNone();
85
+ optional_presence.push_back(is_present);
86
+ hash_code = c10::hash_combine(hash_code, is_present);
87
+ }
88
+
89
+ void addTensor(const IValue& input, bool with_grad) {
90
+ AT_ASSERT(input.isTensor(), "Expected Tensor but found ", input.tagKind());
91
+ tensor_args.emplace_back();
92
+ auto& arg = tensor_args.back();
93
+ // Initialize all fields to 0. This is convenient, because e.g.
94
+ // requires_grad() can be checked even on tensors AND will make
95
+ // padding bits all 0s.
96
+ std::memset(&arg, 0, sizeof(ArgumentInfo));
97
+
98
+ // [argspec refcounting] reinterpret the IValue to avoid having to refcount
99
+ // the Tensor microbenchmarks
100
+ // https://github.com/zdevito/pytorch/commit/21e7200a0a0fc456bea2f10e95b1781f83933d10
101
+ // show overhead in extra refcounting along this path
102
+ const at::Tensor* t = reinterpret_cast<const at::Tensor*>(&input);
103
+ arg.defined_ = t->defined();
104
+ if (arg.defined_) {
105
+ arg.requires_grad_ = with_grad && autograd::Variable(*t).requires_grad();
106
+ arg.dim_ = t->dim();
107
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
108
+ at::Device device = t->device();
109
+ arg.dev_type_ =
110
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
111
+ static_cast<std::underlying_type<DeviceType>::type>(device.type());
112
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
113
+ arg.device_ = device.index();
114
+ arg.type_ = static_cast<unsigned>(t->scalar_type());
115
+ }
116
+ combineHash(arg);
117
+ }
118
+
119
+ void combineHash(const ArgumentInfo& arg) {
120
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
121
+ ArgumentInfo::plain_data_type arg_data;
122
+ std::memcpy(&arg_data, &arg, sizeof(ArgumentInfo));
123
+ hash_code = c10::hash_combine(hash_code, arg_data);
124
+ }
125
+
126
+ // equality is fast: check ninputs, and then check the raw array data,
127
+ // there are no size/stride indirections
128
+ // hopefully std::vector<bool> has fast equality
129
+ bool operator==(const ArgumentSpec& spec) const {
130
+ if (optional_presence != spec.optional_presence) {
131
+ return false;
132
+ }
133
+ if (tensor_args.size() != spec.tensor_args.size())
134
+ return false;
135
+ // NB: we need to break out early when there are no elements, because
136
+ // passing a nullptr to memcmp is UB.
137
+ if (tensor_args.empty())
138
+ return true;
139
+ return std::memcmp(
140
+ tensor_args.data(),
141
+ spec.tensor_args.data(),
142
+ tensor_args.size() * sizeof(ArgumentInfo)) == 0;
143
+ }
144
+ bool operator!=(const ArgumentSpec& spec) const {
145
+ return !(*this == spec);
146
+ }
147
+ size_t numTensors() const {
148
+ return tensor_args.size();
149
+ }
150
+ const ArgumentInfo& tensorAt(size_t i) const {
151
+ return tensor_args[i];
152
+ }
153
+ size_t numOptionals() const {
154
+ return optional_presence.size();
155
+ }
156
+ bool isPresent(size_t i) const {
157
+ return optional_presence[i];
158
+ }
159
+ size_t hashCode() const {
160
+ return hash_code;
161
+ }
162
+
163
+ private:
164
+ size_t hash_code; // precomputed on construction
165
+ std::vector<ArgumentInfo> tensor_args;
166
+ std::vector<bool> optional_presence;
167
+ };
168
+
169
+ namespace {
170
+ static constexpr size_t ARG_SPEC_DEPTH_LIMIT = 128;
171
+ }
172
+
173
+ // ArgumentSpecCreator takes an initial graph and comes up with a set
174
+ // of simple instructions to compute the ArgumentSpec given a set of
175
+ // input tensors.
176
+ struct TORCH_API ArgumentSpecCreator {
177
+ // instructs acts on a stack of a list of input IValues
178
+ // at the beginning the stack contains a single list of the inputs to the
179
+ // function the ENTER_ instructs descend into subobjects and push new lists
180
+ // onto the stack
181
+ enum Inst : char {
182
+ ENTER_TUPLE, // consume a tuple ivalue from the top-most list, and push the
183
+ // list of its elements onto the stack as a new list
184
+ ENTER_OBJECT, // same as ENTER_TUPLE, but the input is a class
185
+ LEAVE, // pop the top-most list from the stack
186
+ SKIP, // consume an element from the top-most list, and discard
187
+ SPECIALIZE_OPTIONAL_TENSOR, // consume a optional tensor for the top-most
188
+ // list, and add it to the ArgSpec key being
189
+ // created
190
+ SPECIALIZE_TENSOR, // consume a tensor for the top-most
191
+ // list, and add it to the ArgSpec key being created
192
+ SPECIALIZE_OPTIONAL,
193
+ // consume a nontensor optional from the top-most list,
194
+ // and add it to the ArgSpec key being created
195
+ };
196
+ ArgumentSpecCreator(Graph& graph);
197
+ ArgumentSpec create(bool with_grad, const Stack& stack) const;
198
+ void specializeTypes(Graph& g, const ArgumentSpec& spec) const;
199
+ void dump() const;
200
+ using WrittenSlots = std::unordered_set<std::string>;
201
+
202
+ private:
203
+ void scan(
204
+ const TypePtr& typ,
205
+ size_t depth,
206
+ const WrittenSlots& written_slots);
207
+ size_t num_inputs_;
208
+ size_t num_tensors_ = 0;
209
+ size_t num_optionals_ = 0;
210
+ std::vector<Inst> instructions_;
211
+ };
212
+
213
+ // CompleteArgumentSpec represents one particular specialization.
214
+ // It is designed so that it can be created, hashed, and compared quickly
215
+ // since it is used along the hot-path of the JIT to check if the code
216
+ // we have created is valid for the given inputs.
217
+
218
+ // COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec
219
+ // API users should use ArgumentInfo
220
+ struct CompleteArgumentInfoPOD {
221
+ // total size is 64-bit
222
+ unsigned is_tensor : 8; // all other fields are invalid if this is false
223
+ unsigned type : 8; // scalar type
224
+ unsigned defined : 1;
225
+ unsigned requires_grad : 1;
226
+ signed device : 14;
227
+ unsigned dev_type : 16;
228
+ unsigned
229
+ total_dims : 16; // all TensorInfoPODs are in CompleteArgumentSpec's
230
+ // tensor_info() array. total_dims is the total number of
231
+ // dimensions seen so far in all previous members of
232
+ // tensor_info(), including this tensor 2*total_dims
233
+ // becomes the offset into the sizes_strides list for the
234
+ // _next_ tensor in the tensor_info array for tensor 0,
235
+ // the offset is always 0
236
+ };
237
+
238
+ static_assert(
239
+ sizeof(CompleteArgumentInfoPOD) == sizeof(int64_t),
240
+ "CompleteArgumentInfoPOD must be 64-bit struct for CompleteArgumentSpec encoding to work");
241
+
242
+ struct CompleteArgumentInfo;
243
+
244
+ struct CompleteArgumentSpec {
245
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
246
+ CompleteArgumentSpec(bool with_grad, at::ArrayRef<IValue> inputs)
247
+ : hash_code(0), ninputs(inputs.size()) {
248
+ int32_t all_dims = 0;
249
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
250
+ const int32_t num_inputs = inputs.size();
251
+ for (const auto i : c10::irange(num_inputs)) {
252
+ if (!inputs[i].isTensor())
253
+ continue;
254
+ auto& tensor = inputs[i].toTensor();
255
+ all_dims += tensor.defined() ? tensor.ndimension() : 0;
256
+ }
257
+ // allocate enough room for all TensorPODs and dimensions
258
+ data.resize(ninputs + all_dims * 2);
259
+
260
+ // and reinterpret our data array as these structs
261
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
262
+ auto* pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
263
+ int64_t* next_dim = sizes_strides();
264
+ int32_t total_dims = 0;
265
+ for (const auto i : c10::irange(num_inputs)) {
266
+ auto& pod = pods[i];
267
+ pod.is_tensor = static_cast<uint32_t>(inputs[i].isTensor());
268
+ if (pod.is_tensor) {
269
+ at::Tensor t = inputs[i].toTensor();
270
+ pod.defined = t.defined();
271
+ if (pod.defined) {
272
+ pod.type = static_cast<int>(t.scalar_type());
273
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
274
+ at::Device device = t.device();
275
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
276
+ pod.dev_type = static_cast<std::underlying_type<DeviceType>::type>(
277
+ device.type());
278
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
279
+ pod.device = device.index();
280
+ pod.requires_grad = with_grad && t.requires_grad();
281
+ total_dims += t.ndimension();
282
+ auto sizes = t.sizes();
283
+ std::copy(sizes.begin(), sizes.end(), next_dim);
284
+ next_dim += sizes.size();
285
+ auto strides = t.strides();
286
+ std::copy(strides.begin(), strides.end(), next_dim);
287
+ next_dim += strides.size();
288
+ }
289
+ }
290
+ // each POD has a running tally of all dimensions including its own
291
+ TORCH_CHECK(
292
+ total_dims < std::numeric_limits<uint16_t>::max(),
293
+ "The number of dims cannot be packed into CompleteArgumentSpec:",
294
+ total_dims);
295
+ pod.total_dims = total_dims;
296
+ }
297
+ // we precompute the hash_code to minimize the time inside of hash
298
+ // table operations where we may need to hold a compiler cache lock.
299
+ hash_code = c10::hash_combine(0, ninputs);
300
+ for (auto d : data) {
301
+ hash_code = c10::hash_combine(hash_code, d);
302
+ }
303
+ }
304
+
305
+ // equality is fast: check ninputs, and then check the raw array data,
306
+ // there are no size/stride indirections
307
+ bool operator==(const CompleteArgumentSpec& spec) const {
308
+ return ninputs == spec.ninputs && data == spec.data;
309
+ }
310
+ bool operator!=(const CompleteArgumentSpec& spec) const {
311
+ return !(*this == spec);
312
+ }
313
+ friend struct CompleteArgumentInfo;
314
+ CompleteArgumentInfo at(size_t i) const;
315
+ size_t size() const {
316
+ return ninputs;
317
+ }
318
+ size_t hashCode() const {
319
+ return hash_code;
320
+ }
321
+
322
+ private:
323
+ ArrayRef<CompleteArgumentInfoPOD> tensor_info() const {
324
+ return ArrayRef<CompleteArgumentInfoPOD>(
325
+ reinterpret_cast<const CompleteArgumentInfoPOD*>(data.data()), ninputs);
326
+ }
327
+ // the start of the sizes_strides information, which comes after the
328
+ // CompleteArgumentInfoPOD list.
329
+ const int64_t* sizes_strides() const {
330
+ return data.data() + ninputs;
331
+ }
332
+ int64_t* sizes_strides() {
333
+ return data.data() + ninputs;
334
+ }
335
+ size_t hash_code; // precomputed on construction
336
+ size_t ninputs;
337
+ // layout is ninputs of TensorPOD (each 64-bit) followed by their size and
338
+ // stride info for 3 tensors:
339
+ // [t0POD][t1POD][t2POD]...
340
+ // [t0 sizes][t0 strides][t1 sizes][t1 strides][t2 sizes][t2 strides]
341
+ std::vector<int64_t> data;
342
+ };
343
+
344
+ // public view of compressed CompleteArgumentInfo
345
+ struct CompleteArgumentInfo {
346
+ CompleteArgumentInfo(const CompleteArgumentSpec& spec, const int i)
347
+ : spec(spec), i(i) {}
348
+ bool isTensor() const {
349
+ return pod(i).is_tensor;
350
+ }
351
+ at::ScalarType type() const {
352
+ return at::ScalarType(pod(i).type);
353
+ }
354
+ bool defined() const {
355
+ return pod(i).defined;
356
+ }
357
+ bool requires_grad() const {
358
+ return pod(i).requires_grad;
359
+ }
360
+ at::Device device() const {
361
+ return at::Device(
362
+ DeviceType(pod(i).dev_type),
363
+ static_cast<c10::DeviceIndex>(pod(i).device));
364
+ }
365
+ int ndimension() const {
366
+ // See [valid range], it is always valid to ask for offset for (i + 1)
367
+ return (sizes_strides_offset(i + 1) - sizes_strides_offset(i)) / 2;
368
+ }
369
+ at::IntArrayRef sizes() const {
370
+ return at::IntArrayRef(
371
+ spec.sizes_strides() + sizes_strides_offset(i), ndimension());
372
+ }
373
+ at::IntArrayRef strides() const {
374
+ int ndim = ndimension();
375
+ return at::IntArrayRef(
376
+ spec.sizes_strides() + sizes_strides_offset(i) + ndim, ndim);
377
+ }
378
+ operator TypePtr() const {
379
+ if (!defined())
380
+ return TensorType::get();
381
+ return TensorType::create(
382
+ type(),
383
+ device(),
384
+ c10::VaryingShape<int64_t>{sizes()},
385
+ c10::VaryingShape<int64_t>{strides()},
386
+ requires_grad());
387
+ }
388
+
389
+ private:
390
+ // offsetinto sizes_strides() array where the sizes start for tensor j
391
+ // [valid range] valid range is [0, ninputs]
392
+ // (i.e. you can ask for the offset at ninputs, which would be the offset of
393
+ // the next tensor if it existed)
394
+ int sizes_strides_offset(int j) const {
395
+ if (j == 0)
396
+ return 0;
397
+ // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
398
+ return 2 * pod(j - 1).total_dims;
399
+ }
400
+ const CompleteArgumentInfoPOD& pod(int j) const {
401
+ return spec.tensor_info().at(j);
402
+ }
403
+ const CompleteArgumentSpec& spec;
404
+ const int i;
405
+ };
406
+
407
+ inline std::ostream& operator<<(std::ostream& out, const ArgumentInfo& info) {
408
+ if (!info.defined()) {
409
+ return out << "<undefined>";
410
+ }
411
+ out << "Tensor(device=" << info.device() << ", type=" << toString(info.type())
412
+ << ", requires_grad=" << info.requires_grad() << ", dims=" << info.dim()
413
+ << ")";
414
+ return out;
415
+ }
416
+
417
+ inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) {
418
+ out << "{";
419
+ for (const auto i : c10::irange(spec.numTensors())) {
420
+ if (i > 0)
421
+ out << ", ";
422
+ out << spec.tensorAt(i);
423
+ }
424
+ out << "; ";
425
+ for (const auto i : c10::irange(spec.numOptionals())) {
426
+ if (i > 0)
427
+ out << ", ";
428
+ out << spec.isPresent(i);
429
+ }
430
+ out << "}";
431
+ return out;
432
+ }
433
+
434
+ inline std::ostream& operator<<(
435
+ std::ostream& out,
436
+ const CompleteArgumentInfo& info) {
437
+ if (!info.defined()) {
438
+ return out << "<undefined>";
439
+ }
440
+ out << "Tensor(device=" << info.device() << ", type=" << toString(info.type())
441
+ << ", requires_grad=" << info.requires_grad()
442
+ << ", sizes=" << info.sizes() << ", strides=" << info.strides() << ")";
443
+ return out;
444
+ }
445
+
446
+ inline std::ostream& operator<<(
447
+ std::ostream& out,
448
+ const CompleteArgumentSpec& spec) {
449
+ out << "{";
450
+ for (const auto i : c10::irange(spec.size())) {
451
+ if (i > 0)
452
+ out << ", ";
453
+ out << spec.at(i);
454
+ }
455
+ out << "}";
456
+ return out;
457
+ }
458
+
459
+ inline CompleteArgumentInfo CompleteArgumentSpec::at(size_t i) const {
460
+ return CompleteArgumentInfo(*this, i);
461
+ }
462
+
463
+ inline c10::optional<int8_t> convertOptional(
464
+ c10::optional<c10::ScalarType> const& from) {
465
+ return (from) ? c10::optional<int8_t>(static_cast<int8_t>(*from))
466
+ : c10::optional<int8_t>{};
467
+ }
468
+
469
+ } // namespace torch::jit
470
+
471
+ namespace std {
472
+
473
+ template <typename T>
474
+ struct hash<c10::VaryingShape<T>> {
475
+ size_t operator()(const c10::VaryingShape<T>& vs) const {
476
+ return c10::get_hash(
477
+ vs.size(),
478
+ vs.size() ? vs.sizes().value() : std::vector<c10::optional<T>>());
479
+ }
480
+ };
481
+
482
+ template <>
483
+ struct hash<c10::TensorType> {
484
+ size_t operator()(const c10::TensorType& ptt) const {
485
+ return c10::get_hash<
486
+ c10::optional<int8_t>,
487
+ c10::VaryingShape<int64_t>,
488
+ c10::VaryingShape<int64_t>,
489
+ c10::optional<bool>>(
490
+ torch::jit::convertOptional(ptt.scalarType()),
491
+ ptt.sizes(),
492
+ ptt.strides(),
493
+ ptt.requiresGrad());
494
+ }
495
+ };
496
+
497
+ template <>
498
+ struct hash<torch::jit::ArgumentSpec> {
499
+ size_t operator()(const torch::jit::ArgumentSpec& spec) const {
500
+ return spec.hashCode();
501
+ }
502
+ };
503
+ template <>
504
+ struct hash<torch::jit::CompleteArgumentSpec> {
505
+ size_t operator()(const torch::jit::CompleteArgumentSpec& spec) const {
506
+ return spec.hashCode();
507
+ }
508
+ };
509
+ } // namespace std
510
+
511
+ C10_CLANG_DIAGNOSTIC_POP()
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry_util.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch::jit {
7
+
8
+ TORCH_API const std::string& GetSerializedDecompositions();
9
+
10
+ TORCH_API const OperatorMap<std::string>& GetDecompositionMapping();
11
+
12
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/instruction.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <typeinfo>
5
+ #include <unordered_set>
6
+
7
+ namespace torch::jit {
8
+ // instruction look like:
9
+ // op_code X, N
10
+ // meaning of X, N depend on the op:
11
+ // O - index into operator table
12
+ // R - index into register table
13
+ // I - literal integer
14
+ // C - index into constant table
15
+ // P - jump offset relative to beginning of current instruction
16
+ // F - index into function table
17
+ // T - index into the type table, used for guard instructions
18
+ // S - index into object slots
19
+ // C - index into code table
20
+
21
+ #define FORALL_OPCODES(_) \
22
+ _(OP, "O") /* invoke operator X */ \
23
+ _(OPN, "OI") /* invoke vararg operator X with N arguments */ \
24
+ _(LOAD, "R") /* push a value from a register X */ \
25
+ _(MOVE, "R") /* push a value from register X, clearing the register */ \
26
+ _(STOREN, "RI") /* store N values to registers [X, X+N) */ \
27
+ _(STORE, "R") /* store 1 value to registers X */ \
28
+ _(DROP, "") /* drop 1 value from the top of the stack */ \
29
+ _(DROPR, "R") /* clear register X */ \
30
+ _(LOADC, "C") /* push the constant X */ \
31
+ _(JF, "P") /* pop the top of the stack, if false, branch to P */ \
32
+ _(JMP, "P") /* unconditional branch to X */ \
33
+ _(LOOP, "PI") /* perform a loop, X is where to branch if cond is false */ \
34
+ _(RET, "") /* exit execution */ \
35
+ _(WAIT, "") /* wait for a future to be complete */ \
36
+ _(CALL, "F") /* call function X */ \
37
+ _(GUARD, "T") /* check a guard against type_table, true if passes */ \
38
+ _(TYPECHECK, "TN") /* check each type of input[i] against type_table[X+N] */ \
39
+ _(FAIL_GUARD, "T") /* fail a guard, patch back to GUARD */ \
40
+ _(PROFILE_OP, "F") /* get a callback from profile_function_table at X */ \
41
+ _(TAIL_CALL, "F") /* replace current frame with function F */ \
42
+ _(INTERFACE_CALL, "CI") /* call method X on the first argument (of N) */ \
43
+ _(GET_ATTR, "S") /* get attribute from slot X in an Object */ \
44
+ _(SET_ATTR, "S") /* set attribute to slot X in an Object */ \
45
+ _(LIST_UNPACK, "I") /* unpack list expecting length I */ \
46
+ _(TUPLE_CONSTRUCT, "I") /* construct a tuple using X inputs */ \
47
+ _(NAMED_TUPLE_CONSTRUCT, \
48
+ "TI") /* construct a tuple of type X, using N inputs */ \
49
+ _(LIST_CONSTRUCT, "TI") /* construct a list of type X, using N inputs */ \
50
+ _(DICT_CONSTRUCT, "TI") /* construct a dict of type X, using N inputs */ \
51
+ _(CREATE_OBJECT, "T") /* create an object of type X */ \
52
+ _(ISINSTANCE, "TI") /* check object is one of types[X:X+N] */ \
53
+ _(TUPLE_SLICE, "II") /* slice tup[X:(X+N)] */ \
54
+ _(TUPLE_INDEX, "") /* get the value from a tuple at that index */ \
55
+ _(RAISE_EXCEPTION, "") /* throws the exception from Python */ \
56
+ _(DICT_INDEX, "") /* gets the value from the dict for given key */ \
57
+ _(UNCHECKED_CAST, "") /* perform an unchecked cast operation */ \
58
+ _(__IS__, "") /* performs `is` operator from Python */ \
59
+ _(UN_INITIALIZED, \
60
+ "") /* sets default values to variables that are uninitialized */ \
61
+ _(__ISNOT__, "") /* performs `is not` operator from Python */ \
62
+ _(FORMAT, "I") /* performs string format function `f strings` or `{}.format` \
63
+ the number of inputs in stored in X */ \
64
+ _(DEVICE, "") /* invokes aten::device for a Tensor */ \
65
+ _(DTYPE, "") /* invokes aten::dtype for a Tensor */ \
66
+ _(DIM, "") /* invokes aten::dim for a Tensor */ \
67
+ _(__NOT__, "") /* performs `not` operator from Python */ \
68
+ _(TO_LIST, "") /* convert the input to a list */ \
69
+ _(NUM_TO_TENSOR, \
70
+ "") /* performs the conversion of a number/scalar to Tensor */ \
71
+ _(IS_CUDA, "") /* invokes aten::is_cuda for a Tensor */ \
72
+ _(FORK, "CN") /* launch a thread to run code entry x with N inputs */ \
73
+ _(WARN, "I") /* emit a warning with line information */ \
74
+ _(ENTER, "EN") /* enter scope of a contextmanager */ \
75
+ _(EXIT, "EX") /* exit the last entered contextmanager */ \
76
+ _(AWAITABLE, "CN") /* initialize await for code entry x with N inputs */
77
+
78
+ enum OpCode : uint8_t {
79
+ #define DEFINE_OP(op, _) op,
80
+ FORALL_OPCODES(DEFINE_OP)
81
+ #undef DEFINE_OP
82
+ };
83
+
84
+ struct Instruction {
85
+ OpCode op;
86
+ uint8_t unused;
87
+ uint16_t N;
88
+ int32_t X;
89
+ // TODO: check for overflow
90
+ Instruction(OpCode op, int32_t X, uint16_t N)
91
+ : op(op), unused(0), N(N), X(X) {}
92
+ };
93
+ std::ostream& operator<<(std::ostream& out, Instruction inst);
94
+
95
+ bool isOpSupportedInMobile(OpCode op);
96
+ char const* toString(OpCode op);
97
+ OpCode parseOpCode(const char* str);
98
+ std::ostream& operator<<(std::ostream& out, Instruction inst);
99
+
100
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_trace.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #include <torch/csrc/jit/ir/ir.h>
2
+ #include <memory>
3
+
4
+ namespace torch::jit {
5
+ TORCH_API std::shared_ptr<Graph> TraceGraph(
6
+ std::shared_ptr<Graph> graph,
7
+ Stack& stack);
8
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/register_ops_utils.h ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Context.h>
4
+ #include <c10/core/DeviceType.h>
5
+ #include <torch/csrc/autograd/autograd.h>
6
+ #include <torch/csrc/autograd/edge.h>
7
+ #include <torch/csrc/autograd/function.h>
8
+ #include <torch/csrc/autograd/generated/variable_factories.h>
9
+ #include <torch/csrc/autograd/variable.h>
10
+ #include <torch/csrc/jit/api/compilation_unit.h>
11
+ #include <torch/csrc/jit/api/module.h>
12
+ #include <torch/csrc/jit/frontend/error_report.h>
13
+ #include <torch/csrc/jit/ir/ir.h>
14
+ #include <torch/csrc/jit/mobile/register_ops_common_utils.h>
15
+ #include <torch/csrc/jit/runtime/custom_operator.h>
16
+ #include <torch/csrc/jit/runtime/graph_executor.h>
17
+ #include <torch/csrc/jit/runtime/jit_exception.h>
18
+ #include <torch/csrc/jit/runtime/logging.h>
19
+ #include <torch/csrc/jit/runtime/operator.h>
20
+ #include <torch/csrc/jit/runtime/print_handler.h>
21
+ #include <torch/csrc/jit/runtime/profiling_record.h>
22
+ #include <torch/csrc/jit/runtime/vararg_functions.h>
23
+ #include <torch/csrc/jit/serialization/pickle.h>
24
+
25
+ #include <ATen/ExpandUtils.h>
26
+ #include <ATen/Parallel.h>
27
+ #include <ATen/WrapDimUtils.h>
28
+ #include <ATen/core/Dict.h>
29
+ #include <ATen/core/Generator.h>
30
+ #include <ATen/core/ivalue.h>
31
+ #include <c10/core/Device.h>
32
+ #include <c10/core/thread_pool.h>
33
+ #include <c10/util/SmallVector.h>
34
+ #include <c10/util/irange.h>
35
+ #include <c10/util/math_compat.h>
36
+ #include <c10/util/string_utils.h>
37
+
38
+ namespace torch::jit {
39
+ constexpr inline c10::AliasAnalysisKind aliasAnalysisFromSchema() {
40
+ return c10::AliasAnalysisKind::FROM_SCHEMA;
41
+ }
42
+
43
+ constexpr inline c10::AliasAnalysisKind aliasAnalysisConservative() {
44
+ return c10::AliasAnalysisKind::CONSERVATIVE;
45
+ }
46
+
47
+ constexpr inline c10::AliasAnalysisKind aliasAnalysisSpecialCase() {
48
+ return c10::AliasAnalysisKind::INTERNAL_SPECIAL_CASE;
49
+ }
50
+
51
+ template <class T>
52
+ c10::List<T> make_result_list(const TypePtr& elemType) {
53
+ return c10::List<T>();
54
+ }
55
+
56
+ template <>
57
+ c10::impl::GenericList make_result_list<IValue>(const TypePtr& elemType);
58
+
59
+ // As described in https://docs.python.org/3/library/functions.html#round
60
+ // When a number is exactly halfway between two integers, python builtin round
61
+ // function will round to even number. We use round(x/2)*2 to handle the
62
+ // special halfway case. For positive 'x', round(x/2)*2 =
63
+ // round((x_e + x_r)/2)*2 = x_e + round(x_r/2)*2, where x_e is an even integer,
64
+ // x_r is either 0.5 of 1.5, round(x_r/2)*2 results a 0 or 2, so the final
65
+ // result will always be a even number. Due to symmetricity, it also applies to
66
+ // negative cases.
67
+ inline double round_to_even(double a) {
68
+ return a - std::floor(a) == 0.5 ? (std::round(a * 0.5) * 2.0) : std::round(a);
69
+ }
70
+
71
+ // using the rules from python_arg_parser FunctionParameter::check
72
+ // tensor cannot have grad set, tensor must be 0 dim,
73
+ // and if the dest is an int the source must be integral type
74
+ void checkImplicitTensorToNum(const at::Tensor& t, bool toInt);
75
+
76
+ static C10_UNUSED int64_t floordiv(int64_t a, int64_t b) {
77
+ if (b == 0) {
78
+ throw std::runtime_error("division by 0");
79
+ }
80
+ if ((a > 0) == (b > 0)) {
81
+ // simple case, both have same sign
82
+ return a / b;
83
+ } else {
84
+ // in python division rounds down, it doesn't not truncate like in c++
85
+ auto r = lldiv(a, b);
86
+ return (r.rem) ? r.quot - 1 : r.quot;
87
+ }
88
+ }
89
+ TORCH_API void checkDoubleInRange(double a);
90
+ static C10_UNUSED int64_t floor(double a) {
91
+ checkDoubleInRange(a);
92
+ return std::floor(a);
93
+ }
94
+ static C10_UNUSED int64_t ceil(double a) {
95
+ checkDoubleInRange(a);
96
+ return std::ceil(a);
97
+ }
98
+
99
+ static C10_UNUSED int64_t gcd(int64_t a, int64_t b) {
100
+ while (b != 0) {
101
+ int64_t r = a % b;
102
+ a = b;
103
+ b = r;
104
+ }
105
+ // in python gcd returns non-negative values
106
+ return std::abs(a);
107
+ }
108
+
109
+ int64_t partProduct(int n, int m);
110
+
111
+ void loop(int n, int64_t& p, int64_t& r);
112
+
113
+ int nminussumofbits(int v);
114
+
115
+ int64_t factorial(int n);
116
+ static const double degToRad = std::acos(-1.0) / 180.0;
117
+ static const double radToDeg = 180.0 / std::acos(-1.0);
118
+ double degrees(double x);
119
+ double radians(double x);
120
+
121
+ // Convert an python index (which may be negative) into an index usable for a
122
+ // C++ container
123
+
124
+ // Equivalent to list.at(idx)
125
+ template <typename T>
126
+ T getItem(const c10::List<T>& list, int64_t idx) {
127
+ const int64_t list_size = list.size();
128
+ const int64_t normalized_idx = normalizeIndex(idx, list_size);
129
+ if (normalized_idx < 0 || normalized_idx >= list_size) {
130
+ throw std::out_of_range("list index out of range");
131
+ }
132
+ return list.get(normalized_idx);
133
+ }
134
+
135
+ template <typename T>
136
+ void setItem(const c10::List<T>& list, int64_t idx, T&& value) {
137
+ const int64_t list_size = list.size();
138
+ const int64_t normalized_idx = normalizeIndex(idx, list_size);
139
+ if (normalized_idx < 0 || normalized_idx >= list_size) {
140
+ throw std::out_of_range("list index out of range");
141
+ }
142
+ list.set(normalized_idx, std::forward<T>(value));
143
+ }
144
+
145
+ void listAppend(Stack& stack);
146
+
147
+ void listReverse(Stack& stack);
148
+
149
+ template <typename T>
150
+ void minList(Stack& stack) {
151
+ c10::List<T> a = pop(stack).to<c10::List<T>>();
152
+ c10::List<T> b = pop(stack).to<c10::List<T>>();
153
+
154
+ size_t min_size = std::min(a.size(), b.size());
155
+ for (const auto i : c10::irange(min_size)) {
156
+ if (a[i] == b[i]) {
157
+ continue;
158
+ }
159
+
160
+ push(stack, a[i] < b[i] ? a : b);
161
+ return;
162
+ }
163
+
164
+ push(stack, b.size() < a.size() ? b : a);
165
+ }
166
+
167
+ template <typename T>
168
+ void maxList(Stack& stack) {
169
+ c10::List<T> a = pop(stack).to<c10::List<T>>();
170
+ c10::List<T> b = pop(stack).to<c10::List<T>>();
171
+
172
+ size_t min_size = std::min(a.size(), b.size());
173
+ for (const auto i : c10::irange(min_size)) {
174
+ if (a[i] == b[i]) {
175
+ continue;
176
+ }
177
+
178
+ push(stack, a[i] > b[i] ? a : b);
179
+ return;
180
+ }
181
+
182
+ push(stack, b.size() > a.size() ? b : a);
183
+ }
184
+
185
+ void listPopImpl(Stack& stack, const char* empty_message);
186
+
187
+ void listPop(Stack& stack);
188
+
189
+ void listClear(Stack& stack);
190
+
191
+ void listDelete(Stack& stack);
192
+
193
+ void listInsert(Stack& stack);
194
+
195
+ template <typename T>
196
+ void listRemove(Stack& stack) {
197
+ T elem = pop(stack).to<T>();
198
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
199
+
200
+ auto pos = std::find(list.begin(), list.end(), elem);
201
+
202
+ if (pos != list.end()) {
203
+ list.erase(pos);
204
+ } else {
205
+ AT_ERROR("list.remove(x): x not in list");
206
+ }
207
+ }
208
+
209
+ template <typename T>
210
+ void listMin(Stack& stack) {
211
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
212
+ size_t list_size = list.size();
213
+ if (list_size == 0) {
214
+ throw std::runtime_error("min() arg is an empty sequence");
215
+ }
216
+
217
+ T min_elem = list[0];
218
+ for (const auto i : c10::irange(1, list_size)) {
219
+ T elem = list[i];
220
+ min_elem = elem < min_elem ? elem : min_elem;
221
+ }
222
+
223
+ stack.push_back(min_elem);
224
+ }
225
+
226
+ template <typename T>
227
+ void listMax(Stack& stack) {
228
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
229
+ size_t list_size = list.size();
230
+ if (list_size == 0) {
231
+ throw std::runtime_error("max() arg is an empty sequence");
232
+ }
233
+
234
+ T max_elem = list[0];
235
+ for (const auto i : c10::irange(1, list_size)) {
236
+ T elem = list[i];
237
+ max_elem = elem > max_elem ? elem : max_elem;
238
+ }
239
+
240
+ stack.push_back(max_elem);
241
+ }
242
+
243
+ template <>
244
+ void listRemove<at::Tensor>(Stack& stack);
245
+
246
+ template <typename T>
247
+ void listIndex(Stack& stack) {
248
+ T elem = pop(stack).to<T>();
249
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
250
+
251
+ auto pos = std::find(list.begin(), list.end(), elem);
252
+
253
+ if (pos != list.end()) {
254
+ push(stack, static_cast<int64_t>(std::distance(list.begin(), pos)));
255
+ } else {
256
+ AT_ERROR("'", elem, "' is not in list");
257
+ }
258
+ }
259
+
260
+ template <>
261
+ void listIndex<at::Tensor>(Stack& stack);
262
+
263
+ template <typename T>
264
+ void listCount(Stack& stack) {
265
+ T elem = pop(stack).to<T>();
266
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
267
+
268
+ const int64_t count = std::count(list.begin(), list.end(), elem);
269
+ push(stack, count);
270
+ }
271
+
272
+ template <>
273
+ void listCount<at::Tensor>(Stack& stack);
274
+
275
+ void listExtend(Stack& stack);
276
+
277
+ void listCopy(Stack& stack);
278
+
279
+ void listSelect(Stack& stack);
280
+
281
+ void listLen(Stack& stack);
282
+
283
+ template <typename T>
284
+ void listEq(Stack& stack) {
285
+ c10::List<T> b = pop(stack).to<c10::List<T>>();
286
+ c10::List<T> a = pop(stack).to<c10::List<T>>();
287
+ push(stack, a == b);
288
+ }
289
+
290
+ template <typename T>
291
+ void listNe(Stack& stack) {
292
+ c10::List<T> b = pop(stack).to<c10::List<T>>();
293
+ c10::List<T> a = pop(stack).to<c10::List<T>>();
294
+ push(stack, a != b);
295
+ }
296
+
297
+ inline bool tensor_list_equal(
298
+ const c10::List<at::Tensor>& a,
299
+ const c10::List<at::Tensor>& b) {
300
+ if (a.size() != b.size()) {
301
+ return false;
302
+ }
303
+
304
+ for (const auto i : c10::irange(a.size())) {
305
+ const at::Tensor& a_element = a[i];
306
+ const at::Tensor& b_element = b[i];
307
+ // This preserves Python's semantics, which uses eq() to compare two
308
+ // elements, then passes the result to bool().
309
+ // see: https://docs.python.org/3.4/reference/datamodel.html#object.__ge__
310
+ const auto cmp_result = a_element.eq(b_element);
311
+ if (!at::native::is_nonzero(cmp_result)) {
312
+ return false;
313
+ }
314
+ }
315
+
316
+ return true;
317
+ }
318
+
319
+ // Specialization for at::Tensor, since it doesn't define operator==
320
+ template <>
321
+ void listEq<at::Tensor>(Stack& stack);
322
+
323
+ // Specialization for at::Tensor, since it doesn't define operator==
324
+ template <>
325
+ void listNe<at::Tensor>(Stack& stack);
326
+
327
+ void listList(Stack& stack);
328
+
329
+ template <typename T>
330
+ void listContains(Stack& stack) {
331
+ auto key = pop(stack).to<T>();
332
+ auto list = pop(stack).to<c10::List<T>>();
333
+ // NOLINTNEXTLINE(performance-implicit-conversion-in-loop)
334
+ for (const T& item : list) {
335
+ if (item == key) {
336
+ push(stack, true);
337
+ return;
338
+ }
339
+ }
340
+ push(stack, false);
341
+ }
342
+
343
+ void listAdd(Stack& stack);
344
+
345
+ void listInplaceAdd(Stack& stack);
346
+
347
+ void listMulIntLeftInPlace(Stack& stack);
348
+
349
+ void listMulIntLeft(Stack& stack);
350
+
351
+ void listMulIntRight(Stack& stack);
352
+
353
+ void listSlice(Stack& stack);
354
+
355
+ template <typename T>
356
+ void listSort(Stack& stack) {
357
+ bool reverse = pop(stack).toBool();
358
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
359
+ std::sort(list.begin(), list.end(), [reverse](const T& a, const T& b) {
360
+ // FBCode errors without this check - "strict weak ordering"
361
+ // TODO: remove when possible, since it just slows down
362
+ // sorting and doesn't do anything useful
363
+ if (a == b) {
364
+ return false;
365
+ }
366
+ return (a < b) != reverse;
367
+ });
368
+ }
369
+
370
+ // Specialization for at::Tensor
371
+ template <>
372
+ void listSort<at::Tensor>(Stack& stack);
373
+
374
+ template <typename T>
375
+ void listCopyAndSort(Stack& stack) {
376
+ c10::List<T> list = pop(stack).to<c10::List<T>>();
377
+ auto list_copied = list.copy();
378
+ std::sort(list_copied.begin(), list_copied.end(), [](const T& a, const T& b) {
379
+ // "strict weak ordering" issue - see other sort
380
+ if (a == b) {
381
+ return false;
382
+ }
383
+ return a < b;
384
+ });
385
+ push(stack, list_copied);
386
+ }
387
+
388
+ // Specialization for at::Tensor
389
+ template <>
390
+ void listCopyAndSort<at::Tensor>(Stack& stack);
391
+
392
+ void listSetItem(Stack& stack);
393
+
394
+ struct OperatorGeneratorArgs {
395
+ const char* schema_str;
396
+ bool isOperationCreator;
397
+ union {
398
+ void (*operation)(Stack&);
399
+ OperationCreator operationCreator;
400
+ };
401
+ AliasAnalysisKind aliasAnalysis;
402
+
403
+ explicit constexpr OperatorGeneratorArgs(
404
+ torch::detail::SelectiveStr<true> schema_str,
405
+ void (*op)(Stack&),
406
+ AliasAnalysisKind aa)
407
+ : schema_str(schema_str),
408
+ isOperationCreator(false),
409
+ operation(op),
410
+ aliasAnalysis(aa) {}
411
+
412
+ explicit constexpr OperatorGeneratorArgs(
413
+ torch::detail::SelectiveStr<true> schema_str,
414
+ OperationCreator opCreator,
415
+ AliasAnalysisKind aa)
416
+ : schema_str(schema_str),
417
+ isOperationCreator(true),
418
+ operationCreator(opCreator),
419
+ aliasAnalysis(aa) {}
420
+
421
+ template <typename... Args>
422
+ explicit constexpr OperatorGeneratorArgs(
423
+ torch::detail::SelectiveStr<false>,
424
+ Args...)
425
+ : schema_str(nullptr),
426
+ isOperationCreator(false),
427
+ operation(nullptr),
428
+ aliasAnalysis(AliasAnalysisKind::INTERNAL_SPECIAL_CASE) {}
429
+ };
430
+
431
+ #define DEFINE_GENERIC_BINARY_OP( \
432
+ aten_op, op, int_float_result, complex_result) \
433
+ OperatorGeneratorArgs( \
434
+ TORCH_SELECTIVE_SCHEMA(#aten_op \
435
+ ".int_int(int a, int b) -> " #int_float_result), \
436
+ [](Stack& stack) { \
437
+ int64_t a, b; \
438
+ pop(stack, a, b); \
439
+ push(stack, op); \
440
+ }, \
441
+ aliasAnalysisFromSchema()), \
442
+ OperatorGeneratorArgs( \
443
+ TORCH_SELECTIVE_SCHEMA( \
444
+ #aten_op \
445
+ ".float_float(float a, float b) -> " #int_float_result), \
446
+ [](Stack& stack) { \
447
+ double a, b; \
448
+ pop(stack, a, b); \
449
+ push(stack, op); \
450
+ }, \
451
+ aliasAnalysisFromSchema()), \
452
+ OperatorGeneratorArgs( \
453
+ TORCH_SELECTIVE_SCHEMA( \
454
+ #aten_op \
455
+ ".complex_complex(complex a, complex b) -> " #complex_result), \
456
+ [](Stack& stack) { \
457
+ c10::complex<double> a, b; \
458
+ pop(stack, a, b); \
459
+ push(stack, op); \
460
+ }, \
461
+ aliasAnalysisFromSchema())
462
+
463
+ // define implementations for primitive number ops
464
+ #define DEFINE_GENERIC_OP(aten_op, int_op, float_op, int_result, float_result) \
465
+ OperatorGeneratorArgs( \
466
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \
467
+ [](Stack& stack) { \
468
+ int64_t a, b; \
469
+ pop(stack, a, b); \
470
+ push(stack, int_op); \
471
+ }, \
472
+ aliasAnalysisFromSchema()), \
473
+ OperatorGeneratorArgs( \
474
+ TORCH_SELECTIVE_SCHEMA( \
475
+ #aten_op ".float(float a, float b) -> " #float_result), \
476
+ [](Stack& stack) { \
477
+ double a, b; \
478
+ pop(stack, a, b); \
479
+ push(stack, float_op); \
480
+ }, \
481
+ aliasAnalysisFromSchema())
482
+
483
+ #define DEFINE_INT_FLOAT_OP(aten_op, op, result) \
484
+ OperatorGeneratorArgs( \
485
+ TORCH_SELECTIVE_SCHEMA(#aten_op \
486
+ ".int_float(int a, float b) -> " #result), \
487
+ [](Stack& stack) { \
488
+ int64_t a; \
489
+ double b; \
490
+ pop(stack, a, b); \
491
+ push(stack, op); \
492
+ }, \
493
+ aliasAnalysisFromSchema()), \
494
+ OperatorGeneratorArgs( \
495
+ TORCH_SELECTIVE_SCHEMA(#aten_op \
496
+ ".float_int(float a, int b) -> " #result), \
497
+ [](Stack& stack) { \
498
+ double a; \
499
+ int64_t b; \
500
+ pop(stack, a, b); \
501
+ push(stack, op); \
502
+ }, \
503
+ aliasAnalysisFromSchema())
504
+
505
+ #define DEFINE_INT_OP(aten_op, op) \
506
+ OperatorGeneratorArgs( \
507
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> int"), \
508
+ [](Stack& stack) { \
509
+ int64_t a, b; \
510
+ pop(stack, a, b); \
511
+ push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \
512
+ }, \
513
+ aliasAnalysisFromSchema())
514
+
515
+ #define DEFINE_STR_CMP_OP(aten_op, op) \
516
+ OperatorGeneratorArgs( \
517
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".str(str a, str b) -> bool"), \
518
+ [](Stack& stack) { \
519
+ auto b = pop(stack).toStringRef(); \
520
+ auto a = pop(stack).toStringRef(); \
521
+ push(stack, op); \
522
+ }, \
523
+ aliasAnalysisFromSchema())
524
+
525
+ // define a primitive op over Scalar operands.
526
+ // it's necessary to register this overload following
527
+ // int/float variations to avoid trapping Scalar args
528
+ // in unintended implicit conversions
529
+ #define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \
530
+ aten_op, int_op, float_op, result, string_val) \
531
+ OperatorGeneratorArgs( \
532
+ TORCH_SELECTIVE_SCHEMA(#aten_op string_val \
533
+ "(Scalar a, Scalar b) -> " #result), \
534
+ [](Stack& stack) { \
535
+ IValue x, y; \
536
+ pop(stack, x, y); \
537
+ if (x.isDouble()) { \
538
+ if (y.isDouble()) { \
539
+ double a = x.toDouble(); \
540
+ double b = y.toDouble(); \
541
+ push(stack, float_op); \
542
+ } else { \
543
+ double a = x.toDouble(); \
544
+ int64_t b = y.toInt(); \
545
+ push(stack, float_op); \
546
+ } \
547
+ } else { \
548
+ if (y.isDouble()) { \
549
+ int64_t a = x.toInt(); \
550
+ double b = y.toDouble(); \
551
+ push(stack, float_op); \
552
+ } else { \
553
+ int64_t a = x.toInt(); \
554
+ int64_t b = y.toInt(); \
555
+ push(stack, int_op); \
556
+ } \
557
+ } \
558
+ }, \
559
+ aliasAnalysisFromSchema())
560
+
561
+ #define DEFINE_SCALAR_BINARY_OP(aten_op, int_op, float_op, result) \
562
+ DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \
563
+ aten_op, int_op, float_op, result, "")
564
+
565
+ #define DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION( \
566
+ aten_op, int_op, float_op, result) \
567
+ DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION_GENERIC( \
568
+ aten_op, int_op, float_op, result, ".Scalar_Scalar")
569
+
570
+ #define DEFINE_BINARY_OP(aten_op, op) \
571
+ DEFINE_GENERIC_OP(aten_op, op, op, int, float), \
572
+ DEFINE_INT_FLOAT_OP(aten_op, op, float), \
573
+ DEFINE_SCALAR_BINARY_OP(aten_op, op, op, Scalar)
574
+
575
+ #define DEFINE_BINARY_FLOAT_OP(aten_op, op) \
576
+ DEFINE_GENERIC_OP(aten_op, op, op, float, float), \
577
+ DEFINE_INT_FLOAT_OP(aten_op, op, float), \
578
+ DEFINE_SCALAR_BINARY_OP(aten_op, op, op, float)
579
+
580
+ #define DEFINE_COMPARISON_OP(aten_op, op) \
581
+ DEFINE_GENERIC_OP(aten_op, op, op, bool, bool), \
582
+ DEFINE_INT_FLOAT_OP(aten_op, op, bool), \
583
+ DEFINE_SCALAR_BINARY_OP(aten_op, op, op, bool), \
584
+ DEFINE_STR_CMP_OP(aten_op, op)
585
+
586
+ #define DEFINE_UNARY_INT_OP(aten_op, op, result) \
587
+ OperatorGeneratorArgs( \
588
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a) -> " #result), \
589
+ [](Stack& stack) { \
590
+ int64_t a; \
591
+ pop(stack, a); \
592
+ push(stack, op); \
593
+ }, \
594
+ aliasAnalysisFromSchema())
595
+
596
+ #define DEFINE_UNARY_FLOAT_OP(aten_op, op, result) \
597
+ OperatorGeneratorArgs( \
598
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".float(float a) -> " #result), \
599
+ [](Stack& stack) { \
600
+ double a; \
601
+ pop(stack, a); \
602
+ push(stack, op); \
603
+ }, \
604
+ aliasAnalysisFromSchema())
605
+
606
+ #define DEFINE_UNARY_OP(aten_op, op, int_result, float_result) \
607
+ DEFINE_UNARY_INT_OP(aten_op, op, int_result), \
608
+ DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \
609
+ OperatorGeneratorArgs( \
610
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
611
+ [](Stack& stack) { \
612
+ IValue x; \
613
+ pop(stack, x); \
614
+ if (x.isDouble()) { \
615
+ double a = x.toDouble(); \
616
+ push(stack, static_cast<float_result>(op)); \
617
+ } else { \
618
+ int64_t a = x.toInt(); \
619
+ push(stack, static_cast<int_result>(op)); \
620
+ } \
621
+ }, \
622
+ aliasAnalysisFromSchema())
623
+ #define DEFINE_BOOL_OP(aten_op, op) \
624
+ OperatorGeneratorArgs( \
625
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".bool(bool a, bool b) -> bool"), \
626
+ [](Stack& stack) { \
627
+ bool a, b; \
628
+ pop(stack, a, b); \
629
+ push(stack, op); \
630
+ }, \
631
+ aliasAnalysisFromSchema())
632
+ #define DEFINE_STRING_OP(op_name, string_op, result) \
633
+ OperatorGeneratorArgs( \
634
+ TORCH_SELECTIVE_SCHEMA(#op_name ".str(str a, str b) ->" #result), \
635
+ [](Stack& stack) { \
636
+ auto b = pop(stack).toStringRef(); \
637
+ auto a = pop(stack).toStringRef(); \
638
+ push(stack, string_op); \
639
+ }, \
640
+ aliasAnalysisFromSchema())
641
+
642
+ //-----------------------------------------------------------------------------
643
+ //-----------------------------------------------------------------------------
644
+ //-----------------------------------------------------------------------------
645
+ //-----------------------------------------------------------------------------
646
+ #define DEFINE_UNARY_COMPLEX_OP(aten_op, op, result) \
647
+ OperatorGeneratorArgs( \
648
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".complex(complex a) -> " #result), \
649
+ [](Stack& stack) { \
650
+ c10::complex<double> a; \
651
+ pop(stack, a); \
652
+ push(stack, op); \
653
+ }, \
654
+ aliasAnalysisFromSchema())
655
+
656
+ // Some complex unary ops (like abs, angle) return real valued output, but most
657
+ // other unary ops return complex valued output. So, this macro is used in the
658
+ // former case where we can explicitly pass complex_result_cast argument, which
659
+ // is set to c10::complex<float> in the macro `DEFINE_UNARY_OP_WITH_COMPLEX`
660
+ // defined below.
661
+ #define DEFINE_UNARY_OP_WITH_COMPLEX_CAST( \
662
+ aten_op, \
663
+ op, \
664
+ int_result, \
665
+ float_result, \
666
+ complex_result, \
667
+ complex_result_cast) \
668
+ DEFINE_UNARY_INT_OP(aten_op, op, int_result), \
669
+ DEFINE_UNARY_FLOAT_OP(aten_op, op, float_result), \
670
+ DEFINE_UNARY_COMPLEX_OP(aten_op, op, complex_result), \
671
+ OperatorGeneratorArgs( \
672
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".Scalar(Scalar a) -> Scalar"), \
673
+ [](Stack& stack) { \
674
+ IValue x; \
675
+ pop(stack, x); \
676
+ if (x.isDouble()) { \
677
+ double a = x.toDouble(); \
678
+ push(stack, static_cast<float_result>(op)); \
679
+ } else if (x.isComplexDouble()) { \
680
+ c10::complex<double> a = x.toComplexDouble(); \
681
+ push(stack, static_cast<complex_result_cast>(op)); \
682
+ } else { \
683
+ int64_t a = x.toInt(); \
684
+ push(stack, static_cast<int_result>(op)); \
685
+ } \
686
+ }, \
687
+ aliasAnalysisFromSchema())
688
+
689
+ #define DEFINE_UNARY_OP_WITH_COMPLEX(aten_op, op, int_result, float_result) \
690
+ DEFINE_UNARY_OP_WITH_COMPLEX_CAST( \
691
+ aten_op, op, int_result, float_result, complex, c10::complex<double>)
692
+
693
+ #define DEFINE_GENERIC_OP_WITH_COMPLEX( \
694
+ aten_op, \
695
+ int_op, \
696
+ float_op, \
697
+ complex_op, \
698
+ int_result, \
699
+ float_result, \
700
+ complex_result) \
701
+ OperatorGeneratorArgs( \
702
+ TORCH_SELECTIVE_SCHEMA(#aten_op ".int(int a, int b) -> " #int_result), \
703
+ [](Stack& stack) { \
704
+ int64_t a, b; \
705
+ pop(stack, a, b); \
706
+ push(stack, int_op); \
707
+ }, \
708
+ aliasAnalysisFromSchema()), \
709
+ OperatorGeneratorArgs( \
710
+ TORCH_SELECTIVE_SCHEMA( \
711
+ #aten_op ".complex(complex a, complex b) -> " #complex_result), \
712
+ [](Stack& stack) { \
713
+ c10::complex<double> a, b; \
714
+ pop(stack, a, b); \
715
+ push(stack, complex_op); \
716
+ }, \
717
+ aliasAnalysisFromSchema()), \
718
+ OperatorGeneratorArgs( \
719
+ TORCH_SELECTIVE_SCHEMA( \
720
+ #aten_op ".float(float a, float b) -> " #float_result), \
721
+ [](Stack& stack) { \
722
+ double a, b; \
723
+ pop(stack, a, b); \
724
+ push(stack, float_op); \
725
+ }, \
726
+ aliasAnalysisFromSchema())
727
+
728
+ #define DEFINE_INT_COMPLEX_OP(aten_op, op, result) \
729
+ OperatorGeneratorArgs( \
730
+ TORCH_SELECTIVE_SCHEMA(#aten_op \
731
+ ".int_complex(int a, complex b) -> " #result), \
732
+ [](Stack& stack) { \
733
+ int64_t a; \
734
+ c10::complex<double> b; \
735
+ pop(stack, a, b); \
736
+ push(stack, op); \
737
+ }, \
738
+ aliasAnalysisFromSchema()), \
739
+ OperatorGeneratorArgs( \
740
+ TORCH_SELECTIVE_SCHEMA( \
741
+ #aten_op ".complex_int(complex a, int b) -> " #result), \
742
+ [](Stack& stack) { \
743
+ c10::complex<double> a; \
744
+ int64_t b; \
745
+ pop(stack, a, b); \
746
+ push(stack, op); \
747
+ }, \
748
+ aliasAnalysisFromSchema())
749
+
750
+ #define DEFINE_FLOAT_COMPLEX_OP(aten_op, op, result) \
751
+ OperatorGeneratorArgs( \
752
+ TORCH_SELECTIVE_SCHEMA( \
753
+ #aten_op ".float_complex(float a, complex b) -> " #result), \
754
+ [](Stack& stack) { \
755
+ double a; \
756
+ c10::complex<double> b; \
757
+ pop(stack, a, b); \
758
+ push(stack, op); \
759
+ }, \
760
+ aliasAnalysisFromSchema()), \
761
+ OperatorGeneratorArgs( \
762
+ TORCH_SELECTIVE_SCHEMA( \
763
+ #aten_op ".complex_float(complex a, float b) -> " #result), \
764
+ [](Stack& stack) { \
765
+ c10::complex<double> a; \
766
+ double b; \
767
+ pop(stack, a, b); \
768
+ push(stack, op); \
769
+ }, \
770
+ aliasAnalysisFromSchema())
771
+
772
+ #define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \
773
+ aten_op, int_op, float_op, complex_op, result, string_val) \
774
+ OperatorGeneratorArgs( \
775
+ TORCH_SELECTIVE_SCHEMA(#aten_op string_val \
776
+ "(Scalar a, Scalar b) -> " #result), \
777
+ [](Stack& stack) { \
778
+ IValue x, y; \
779
+ pop(stack, x, y); \
780
+ if (x.isComplexDouble()) { \
781
+ c10::complex<double> a = x.toComplexDouble(); \
782
+ if (y.isComplexDouble()) { \
783
+ c10::complex<double> b = y.toComplexDouble(); \
784
+ push(stack, complex_op); \
785
+ } else if (y.isDouble()) { \
786
+ double b = y.toDouble(); \
787
+ push(stack, complex_op); \
788
+ } else { \
789
+ int64_t b = y.toInt(); \
790
+ push(stack, complex_op); \
791
+ } \
792
+ } else if (x.isDouble()) { \
793
+ double a = x.toDouble(); \
794
+ if (y.isComplexDouble()) { \
795
+ c10::complex<double> b = y.toComplexDouble(); \
796
+ push(stack, complex_op); \
797
+ } else if (y.isDouble()) { \
798
+ double b = y.toDouble(); \
799
+ push(stack, float_op); \
800
+ } else { \
801
+ int64_t b = y.toInt(); \
802
+ push(stack, float_op); \
803
+ } \
804
+ } else { \
805
+ int64_t a = x.toInt(); \
806
+ if (y.isComplexDouble()) { \
807
+ c10::complex<double> b = y.toComplexDouble(); \
808
+ push(stack, complex_op); \
809
+ } else if (y.isDouble()) { \
810
+ double b = y.toDouble(); \
811
+ push(stack, float_op); \
812
+ } else { \
813
+ int64_t b = y.toInt(); \
814
+ push(stack, int_op); \
815
+ } \
816
+ } \
817
+ }, \
818
+ aliasAnalysisFromSchema())
819
+
820
+ #define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR( \
821
+ aten_op, int_op, float_op, complex_op, result) \
822
+ OperatorGeneratorArgs( \
823
+ TORCH_SELECTIVE_SCHEMA(#aten_op "(Scalar a, Scalar b) -> " #result), \
824
+ [](Stack& stack) { \
825
+ IValue x, y; \
826
+ pop(stack, x, y); \
827
+ if (x.isComplexDouble()) { \
828
+ c10::complex<double> a = x.toComplexDouble(); \
829
+ if (y.isComplexDouble()) { \
830
+ c10::complex<double> b = y.toComplexDouble(); \
831
+ push(stack, complex_op); \
832
+ } else if (y.isDouble()) { \
833
+ double b = y.toDouble(); \
834
+ push(stack, complex_op); \
835
+ } \
836
+ } else if (x.isDouble()) { \
837
+ double a = x.toDouble(); \
838
+ if (y.isComplexDouble()) { \
839
+ c10::complex<double> b = y.toComplexDouble(); \
840
+ push(stack, complex_op); \
841
+ } else if (y.isDouble()) { \
842
+ double b = y.toDouble(); \
843
+ push(stack, float_op); \
844
+ } else { \
845
+ int64_t b = y.toInt(); \
846
+ push(stack, float_op); \
847
+ } \
848
+ } else { \
849
+ int64_t a = x.toInt(); \
850
+ if (y.isDouble()) { \
851
+ double b = y.toDouble(); \
852
+ push(stack, float_op); \
853
+ } else if (y.isInt()) { \
854
+ int64_t b = y.toInt(); \
855
+ push(stack, int_op); \
856
+ } \
857
+ } \
858
+ }, \
859
+ aliasAnalysisFromSchema())
860
+
861
+ #define DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX( \
862
+ aten_op, int_op, float_op, complex_op, result) \
863
+ DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_AVOID_COLLISION_GENERIC( \
864
+ aten_op, int_op, float_op, complex_op, result, "")
865
+
866
+ #define DEFINE_BINARY_OP_WITH_COMPLEX(aten_op, op) \
867
+ DEFINE_GENERIC_OP_WITH_COMPLEX(aten_op, op, op, op, int, float, complex), \
868
+ DEFINE_INT_COMPLEX_OP(aten_op, op, complex), \
869
+ DEFINE_FLOAT_COMPLEX_OP(aten_op, op, complex), \
870
+ DEFINE_INT_FLOAT_OP(aten_op, op, float), \
871
+ DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX(aten_op, op, op, op, Scalar)
872
+
873
+ #define DEFINE_COMPARISON_OP_WITH_COMPLEX(aten_op, op) \
874
+ DEFINE_GENERIC_OP_WITH_COMPLEX(aten_op, op, op, op, bool, bool, bool), \
875
+ DEFINE_INT_FLOAT_OP(aten_op, op, bool), \
876
+ DEFINE_FLOAT_COMPLEX_OP(aten_op, op, bool), \
877
+ DEFINE_SCALAR_BINARY_OP_WITH_COMPLEX_WITHOUT_INT_COMPLEX_PAIR( \
878
+ aten_op, op, op, op, bool), \
879
+ DEFINE_STR_CMP_OP(aten_op, op)
880
+
881
+ TORCH_API at::Generator make_generator_for_device(
882
+ c10::Device device,
883
+ c10::optional<int64_t> seed = c10::nullopt);
884
+
885
+ } // namespace torch::jit
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/variable_tensor_list.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+
4
+ namespace torch::jit {
5
+
6
+ // a wrapper to mark places where we expect all the at::Tensors to be
7
+ // variables
8
+ struct variable_tensor_list : public std::vector<at::Tensor> {
9
+ variable_tensor_list() = default;
10
+ template <class InputIt>
11
+ variable_tensor_list(InputIt first, InputIt last)
12
+ : std::vector<at::Tensor>(first, last) {}
13
+ explicit variable_tensor_list(std::vector<at::Tensor>&& tensor)
14
+ : std::vector<at::Tensor>(std::move(tensor)) {}
15
+ };
16
+
17
+ } // namespace torch::jit
vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/LICENSE ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ==============================================================================
2
+ Thrust is under the Apache Licence v2.0, with some specific exceptions listed below
3
+ libcu++ is under the Apache License v2.0 with LLVM Exceptions:
4
+ ==============================================================================
5
+ Apache License
6
+ Version 2.0, January 2004
7
+ http://www.apache.org/licenses/
8
+
9
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
10
+
11
+ 1. Definitions.
12
+
13
+ "License" shall mean the terms and conditions for use, reproduction,
14
+ and distribution as defined by Sections 1 through 9 of this document.
15
+
16
+ "Licensor" shall mean the copyright owner or entity authorized by
17
+ the copyright owner that is granting the License.
18
+
19
+ "Legal Entity" shall mean the union of the acting entity and all
20
+ other entities that control, are controlled by, or are under common
21
+ control with that entity. For the purposes of this definition,
22
+ "control" means (i) the power, direct or indirect, to cause the
23
+ direction or management of such entity, whether by contract or
24
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
25
+ outstanding shares, or (iii) beneficial ownership of such entity.
26
+
27
+ "You" (or "Your") shall mean an individual or Legal Entity
28
+ exercising permissions granted by this License.
29
+
30
+ "Source" form shall mean the preferred form for making modifications,
31
+ including but not limited to software source code, documentation
32
+ source, and configuration files.
33
+
34
+ "Object" form shall mean any form resulting from mechanical
35
+ transformation or translation of a Source form, including but
36
+ not limited to compiled object code, generated documentation,
37
+ and conversions to other media types.
38
+
39
+ "Work" shall mean the work of authorship, whether in Source or
40
+ Object form, made available under the License, as indicated by a
41
+ copyright notice that is included in or attached to the work
42
+ (an example is provided in the Appendix below).
43
+
44
+ "Derivative Works" shall mean any work, whether in Source or Object
45
+ form, that is based on (or derived from) the Work and for which the
46
+ editorial revisions, annotations, elaborations, or other modifications
47
+ represent, as a whole, an original work of authorship. For the purposes
48
+ of this License, Derivative Works shall not include works that remain
49
+ separable from, or merely link (or bind by name) to the interfaces of,
50
+ the Work and Derivative Works thereof.
51
+
52
+ "Contribution" shall mean any work of authorship, including
53
+ the original version of the Work and any modifications or additions
54
+ to that Work or Derivative Works thereof, that is intentionally
55
+ submitted to Licensor for inclusion in the Work by the copyright owner
56
+ or by an individual or Legal Entity authorized to submit on behalf of
57
+ the copyright owner. For the purposes of this definition, "submitted"
58
+ means any form of electronic, verbal, or written communication sent
59
+ to the Licensor or its representatives, including but not limited to
60
+ communication on electronic mailing lists, source code control systems,
61
+ and issue tracking systems that are managed by, or on behalf of, the
62
+ Licensor for the purpose of discussing and improving the Work, but
63
+ excluding communication that is conspicuously marked or otherwise
64
+ designated in writing by the copyright owner as "Not a Contribution."
65
+
66
+ "Contributor" shall mean Licensor and any individual or Legal Entity
67
+ on behalf of whom a Contribution has been received by Licensor and
68
+ subsequently incorporated within the Work.
69
+
70
+ 2. Grant of Copyright License. Subject to the terms and conditions of
71
+ this License, each Contributor hereby grants to You a perpetual,
72
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
73
+ copyright license to reproduce, prepare Derivative Works of,
74
+ publicly display, publicly perform, sublicense, and distribute the
75
+ Work and such Derivative Works in Source or Object form.
76
+
77
+ 3. Grant of Patent License. Subject to the terms and conditions of
78
+ this License, each Contributor hereby grants to You a perpetual,
79
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
80
+ (except as stated in this section) patent license to make, have made,
81
+ use, offer to sell, sell, import, and otherwise transfer the Work,
82
+ where such license applies only to those patent claims licensable
83
+ by such Contributor that are necessarily infringed by their
84
+ Contribution(s) alone or by combination of their Contribution(s)
85
+ with the Work to which such Contribution(s) was submitted. If You
86
+ institute patent litigation against any entity (including a
87
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
88
+ or a Contribution incorporated within the Work constitutes direct
89
+ or contributory patent infringement, then any patent licenses
90
+ granted to You under this License for that Work shall terminate
91
+ as of the date such litigation is filed.
92
+
93
+ 4. Redistribution. You may reproduce and distribute copies of the
94
+ Work or Derivative Works thereof in any medium, with or without
95
+ modifications, and in Source or Object form, provided that You
96
+ meet the following conditions:
97
+
98
+ (a) You must give any other recipients of the Work or
99
+ Derivative Works a copy of this License; and
100
+
101
+ (b) You must cause any modified files to carry prominent notices
102
+ stating that You changed the files; and
103
+
104
+ (c) You must retain, in the Source form of any Derivative Works
105
+ that You distribute, all copyright, patent, trademark, and
106
+ attribution notices from the Source form of the Work,
107
+ excluding those notices that do not pertain to any part of
108
+ the Derivative Works; and
109
+
110
+ (d) If the Work includes a "NOTICE" text file as part of its
111
+ distribution, then any Derivative Works that You distribute must
112
+ include a readable copy of the attribution notices contained
113
+ within such NOTICE file, excluding those notices that do not
114
+ pertain to any part of the Derivative Works, in at least one
115
+ of the following places: within a NOTICE text file distributed
116
+ as part of the Derivative Works; within the Source form or
117
+ documentation, if provided along with the Derivative Works; or,
118
+ within a display generated by the Derivative Works, if and
119
+ wherever such third-party notices normally appear. The contents
120
+ of the NOTICE file are for informational purposes only and
121
+ do not modify the License. You may add Your own attribution
122
+ notices within Derivative Works that You distribute, alongside
123
+ or as an addendum to the NOTICE text from the Work, provided
124
+ that such additional attribution notices cannot be construed
125
+ as modifying the License.
126
+
127
+ You may add Your own copyright statement to Your modifications and
128
+ may provide additional or different license terms and conditions
129
+ for use, reproduction, or distribution of Your modifications, or
130
+ for any such Derivative Works as a whole, provided Your use,
131
+ reproduction, and distribution of the Work otherwise complies with
132
+ the conditions stated in this License.
133
+
134
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
135
+ any Contribution intentionally submitted for inclusion in the Work
136
+ by You to the Licensor shall be under the terms and conditions of
137
+ this License, without any additional terms or conditions.
138
+ Notwithstanding the above, nothing herein shall supersede or modify
139
+ the terms of any separate license agreement you may have executed
140
+ with Licensor regarding such Contributions.
141
+
142
+ 6. Trademarks. This License does not grant permission to use the trade
143
+ names, trademarks, service marks, or product names of the Licensor,
144
+ except as required for reasonable and customary use in describing the
145
+ origin of the Work and reproducing the content of the NOTICE file.
146
+
147
+ 7. Disclaimer of Warranty. Unless required by applicable law or
148
+ agreed to in writing, Licensor provides the Work (and each
149
+ Contributor provides its Contributions) on an "AS IS" BASIS,
150
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
151
+ implied, including, without limitation, any warranties or conditions
152
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
153
+ PARTICULAR PURPOSE. You are solely responsible for determining the
154
+ appropriateness of using or redistributing the Work and assume any
155
+ risks associated with Your exercise of permissions under this License.
156
+
157
+ 8. Limitation of Liability. In no event and under no legal theory,
158
+ whether in tort (including negligence), contract, or otherwise,
159
+ unless required by applicable law (such as deliberate and grossly
160
+ negligent acts) or agreed to in writing, shall any Contributor be
161
+ liable to You for damages, including any direct, indirect, special,
162
+ incidental, or consequential damages of any character arising as a
163
+ result of this License or out of the use or inability to use the
164
+ Work (including but not limited to damages for loss of goodwill,
165
+ work stoppage, computer failure or malfunction, or any and all
166
+ other commercial damages or losses), even if such Contributor
167
+ has been advised of the possibility of such damages.
168
+
169
+ 9. Accepting Warranty or Additional Liability. While redistributing
170
+ the Work or Derivative Works thereof, You may choose to offer,
171
+ and charge a fee for, acceptance of support, warranty, indemnity,
172
+ or other liability obligations and/or rights consistent with this
173
+ License. However, in accepting such obligations, You may act only
174
+ on Your own behalf and on Your sole responsibility, not on behalf
175
+ of any other Contributor, and only if You agree to indemnify,
176
+ defend, and hold each Contributor harmless for any liability
177
+ incurred by, or claims asserted against, such Contributor by reason
178
+ of your accepting any such warranty or additional liability.
179
+
180
+ END OF TERMS AND CONDITIONS
181
+
182
+ APPENDIX: How to apply the Apache License to your work.
183
+
184
+ To apply the Apache License to your work, attach the following
185
+ boilerplate notice, with the fields enclosed by brackets "[]"
186
+ replaced with your own identifying information. (Don't include
187
+ the brackets!) The text should be enclosed in the appropriate
188
+ comment syntax for the file format. We also recommend that a
189
+ file or class name and description of purpose be included on the
190
+ same "printed page" as the copyright notice for easier
191
+ identification within third-party archives.
192
+
193
+ Copyright [yyyy] [name of copyright owner]
194
+
195
+ Licensed under the Apache License, Version 2.0 (the "License");
196
+ you may not use this file except in compliance with the License.
197
+ You may obtain a copy of the License at
198
+
199
+ http://www.apache.org/licenses/LICENSE-2.0
200
+
201
+ Unless required by applicable law or agreed to in writing, software
202
+ distributed under the License is distributed on an "AS IS" BASIS,
203
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
204
+ See the License for the specific language governing permissions and
205
+ limitations under the License.
206
+
207
+
208
+ ==============================================================================
209
+ Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
210
+ ==============================================================================
211
+ ---- LLVM Exceptions to the Apache 2.0 License ----
212
+
213
+ As an exception, if, as a result of your compiling your source code, portions
214
+ of this Software are embedded into an Object form of such source code, you
215
+ may redistribute such embedded portions in such Object form without complying
216
+ with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
217
+
218
+ In addition, if you combine or link compiled forms of this Software with
219
+ software that is licensed under the GPLv2 ("Combined Software") and if a
220
+ court of competent jurisdiction determines that the patent provision (Section
221
+ 3), the indemnity provision (Section 9) or other Section of the License
222
+ conflicts with the conditions of the GPLv2, you may retroactively and
223
+ prospectively choose to deem waived or otherwise exclude such Section(s) of
224
+ the License, but only in their entirety and only with respect to the Combined
225
+ Software.
226
+
227
+ ==============================================================================
228
+ Software from third parties included in the LLVM Project:
229
+ ==============================================================================
230
+ The LLVM Project contains third party software which is under different license
231
+ terms. All such code will be identified clearly using at least one of two
232
+ mechanisms:
233
+ 1) It will be in a separate directory tree with its own `LICENSE.txt` or
234
+ `LICENSE` file at the top containing the specific license and restrictions
235
+ which apply to that software, or
236
+ 2) It will contain specific license and restriction terms at the top of every
237
+ file.
238
+
239
+ ==============================================================================
240
+ Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
241
+ ==============================================================================
242
+
243
+ The libc++ library is dual licensed under both the University of Illinois
244
+ "BSD-Like" license and the MIT license. As a user of this code you may choose
245
+ to use it under either license. As a contributor, you agree to allow your code
246
+ to be used under both.
247
+
248
+ Full text of the relevant licenses is included below.
249
+
250
+ ==============================================================================
251
+
252
+ University of Illinois/NCSA
253
+ Open Source License
254
+
255
+ Copyright (c) 2009-2019 by the contributors listed in CREDITS.TXT
256
+
257
+ All rights reserved.
258
+
259
+ Developed by:
260
+
261
+ LLVM Team
262
+
263
+ University of Illinois at Urbana-Champaign
264
+
265
+ http://llvm.org
266
+
267
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
268
+ this software and associated documentation files (the "Software"), to deal with
269
+ the Software without restriction, including without limitation the rights to
270
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
271
+ of the Software, and to permit persons to whom the Software is furnished to do
272
+ so, subject to the following conditions:
273
+
274
+ * Redistributions of source code must retain the above copyright notice,
275
+ this list of conditions and the following disclaimers.
276
+
277
+ * Redistributions in binary form must reproduce the above copyright notice,
278
+ this list of conditions and the following disclaimers in the
279
+ documentation and/or other materials provided with the distribution.
280
+
281
+ * Neither the names of the LLVM Team, University of Illinois at
282
+ Urbana-Champaign, nor the names of its contributors may be used to
283
+ endorse or promote products derived from this Software without specific
284
+ prior written permission.
285
+
286
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
287
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
288
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
289
+ CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
290
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
291
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
292
+ SOFTWARE.
293
+
294
+ ==============================================================================
295
+
296
+ Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT
297
+
298
+ Permission is hereby granted, free of charge, to any person obtaining a copy
299
+ of this software and associated documentation files (the "Software"), to deal
300
+ in the Software without restriction, including without limitation the rights
301
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
302
+ copies of the Software, and to permit persons to whom the Software is
303
+ furnished to do so, subject to the following conditions:
304
+
305
+ The above copyright notice and this permission notice shall be included in
306
+ all copies or substantial portions of the Software.
307
+
308
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
309
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
310
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
311
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
312
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
313
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
314
+ THE SOFTWARE.
315
+
316
+ ==============================================================================
317
+
318
+ Some libcudacxx components are covered by the below license. Each source file
319
+ indicates which license it is under.
320
+
321
+ ==============================================================================
322
+
323
+ NVIDIA SOFTWARE LICENSE
324
+
325
+ This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”).
326
+
327
+ This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users.
328
+
329
+ You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions.
330
+
331
+ 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license.
332
+
333
+ 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant:
334
+ a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights.
335
+ b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE.
336
+
337
+ 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows:
338
+ a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs.
339
+ b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE.
340
+ c. You may not modify or create derivative works of any portion of the SOFTWARE.
341
+ d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE.
342
+ e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge.
343
+ f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses.
344
+ g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms.
345
+
346
+ 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems.
347
+
348
+ 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE.
349
+
350
+ 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict.
351
+
352
+ 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice.
353
+
354
+ 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED.
355
+
356
+ 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT.
357
+
358
+ 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you.
359
+
360
+ 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction.
361
+
362
+ 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect.
363
+
364
+ 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE.
365
+
366
+ 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051.
367
+
368
+ 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party.
369
+
370
+ (v. August 20, 2021)
371
+
372
+ ================================================================================
373
+ Some portions of Thrust may be licensed under other compatible open-source
374
+ licenses. Any divergence from the Apache 2 license will be noted in the source
375
+ code where applicable.
376
+ Portions under other terms include, but are not limited to:
377
+ ================================================================================
378
+
379
+ Various C++ utility classes in Thrust are based on the Boost Iterator, Tuple,
380
+ System, and Random Number libraries, which are provided under the Boost Software
381
+ License:
382
+
383
+ Boost Software License - Version 1.0 - August 17th, 2003
384
+
385
+ Permission is hereby granted, free of charge, to any person or organization
386
+ obtaining a copy of the software and accompanying documentation covered by
387
+ this license (the "Software") to use, reproduce, display, distribute,
388
+ execute, and transmit the Software, and to prepare derivative works of the
389
+ Software, and to permit third-parties to whom the Software is furnished to
390
+ do so, all subject to the following:
391
+
392
+ The copyright notices in the Software and this entire statement, including
393
+ the above license grant, this restriction and the following disclaimer,
394
+ must be included in all copies of the Software, in whole or in part, and
395
+ all derivative works of the Software, unless such copies or derivative
396
+ works are solely in the form of machine-executable object code generated by
397
+ a source language processor.
398
+
399
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
400
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
401
+ FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
402
+ SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
403
+ FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
404
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
405
+ DEALINGS IN THE SOFTWARE.
406
+
407
+ ================================================================================
408
+
409
+ Portions of the thrust::complex implementation are derived from FreeBSD with the
410
+ following terms:
411
+
412
+ ================================================================================
413
+
414
+
415
+ Redistribution and use in source and binary forms, with or without
416
+ modification, are permitted provided that the following conditions
417
+ are met:
418
+
419
+ 1. Redistributions of source code must retain the above copyright
420
+ notice[1] unmodified, this list of conditions, and the following
421
+ disclaimer.
422
+ 2. Redistributions in binary form must reproduce the above copyright
423
+ notice, this list of conditions and the following disclaimer in the
424
+ documentation and/or other materials provided with the distribution.
425
+
426
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
427
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
428
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
429
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
430
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
431
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
432
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
433
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
434
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
435
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
436
+
437
+ [1] Individual copyright notices from the original authors are included in
438
+ the relevant source files.
439
+
440
+ ==============================================================================
441
+ CUB's source code is released under the BSD 3-Clause license:
442
+ ==============================================================================
443
+ Copyright (c) 2010-2011, Duane Merrill. All rights reserved.
444
+ Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
445
+
446
+ Redistribution and use in source and binary forms, with or without
447
+ modification, are permitted provided that the following conditions are met:
448
+ * Redistributions of source code must retain the above copyright
449
+ notice, this list of conditions and the following disclaimer.
450
+ * Redistributions in binary form must reproduce the above copyright
451
+ notice, this list of conditions and the following disclaimer in the
452
+ documentation and/or other materials provided with the distribution.
453
+ * Neither the name of the NVIDIA CORPORATION nor the
454
+ names of its contributors may be used to endorse or promote products
455
+ derived from this software without specific prior written permission.
456
+
457
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
458
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
459
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
460
+ DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
461
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
462
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
463
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
464
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
465
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
466
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/config.cuh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3
+ *
4
+ * Redistribution and use in source and binary forms, with or without
5
+ * modification, are permitted provided that the following conditions are met:
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the NVIDIA CORPORATION nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18
+ * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
+ *
26
+ ******************************************************************************/
27
+
28
+ /**
29
+ * \file
30
+ * Static configuration header for the CUB project.
31
+ */
32
+
33
+ #pragma once
34
+
35
+ // For _CCCL_IMPLICIT_SYSTEM_HEADER
36
+ #include <cuda/__cccl_config> // IWYU pragma: export
37
+
38
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
39
+ # pragma GCC system_header
40
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
41
+ # pragma clang system_header
42
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
43
+ # pragma system_header
44
+ #endif // no system header
45
+
46
+ #include <cub/util_arch.cuh> // IWYU pragma: export
47
+ #include <cub/util_compiler.cuh> // IWYU pragma: export
48
+ #include <cub/util_cpp_dialect.cuh> // IWYU pragma: export
49
+ #include <cub/util_macro.cuh> // IWYU pragma: export
50
+ #include <cub/util_namespace.cuh> // IWYU pragma: export