Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h +13 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h +369 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h +16 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h +157 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h +11 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h +49 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h +2 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorTransformations.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h +20 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/native/layer_norm.h +100 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h +277 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h +111 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h +47 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h +15 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h +186 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h +122 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h +0 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h +58 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h +96 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h +79 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h +34 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h +53 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h +167 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h +37 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h +7 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h +4 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h +14 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h +7 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h +51 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h +525 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h +35 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h +61 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h +100 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h +158 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h +144 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h +32 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h +49 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h +426 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h +14 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h +259 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h +78 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h +37 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h +126 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h +15 -0
.gitattributes
CHANGED
|
@@ -1685,3 +1685,5 @@ parrot/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64
|
|
| 1685 |
vllm/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1686 |
parrot/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1687 |
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 1685 |
vllm/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1686 |
parrot/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1687 |
parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1688 |
+
vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1689 |
+
vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/macros/Export.h>
|
| 3 |
+
#include <limits>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
class TensorBase;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
namespace at::native {
|
| 10 |
+
|
| 11 |
+
TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
|
| 12 |
+
|
| 13 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Device.h>
|
| 4 |
+
#include <ATen/Dispatch.h>
|
| 5 |
+
#include <ATen/ScalarType.h>
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
#include <ATen/native/utils/ParamsHash.h>
|
| 8 |
+
#include <c10/util/Exception.h>
|
| 9 |
+
#include <c10/util/irange.h>
|
| 10 |
+
|
| 11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 12 |
+
#include <ATen/NativeFunctions.h>
|
| 13 |
+
#else
|
| 14 |
+
#include <ATen/ops/result_type_native.h>
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
#include <unordered_map>
|
| 18 |
+
#include <vector>
|
| 19 |
+
|
| 20 |
+
namespace at::native {
|
| 21 |
+
namespace {
|
| 22 |
+
// Check if tensor list has either a boolean tensor or a integer tensor
|
| 23 |
+
inline bool has_integral_tensor(TensorList tensors, const bool includeBool) {
|
| 24 |
+
return std::any_of(
|
| 25 |
+
tensors.begin(), tensors.end(), [&includeBool](const auto& t) {
|
| 26 |
+
return at::isIntegralType(t.scalar_type(), includeBool);
|
| 27 |
+
});
|
| 28 |
+
}
|
| 29 |
+
// check if tensor list has bool tensors
|
| 30 |
+
inline bool has_bool_tensor(TensorList tensors) {
|
| 31 |
+
return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool {
|
| 32 |
+
return t.scalar_type() == ScalarType::Bool;
|
| 33 |
+
});
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// Check foreach API restrictions
|
| 37 |
+
// - Tensor lists must be non-empty.
|
| 38 |
+
// - All TensorLists and ScalarLists must have the same number of elements.
|
| 39 |
+
// - Corresponding tensors must have the same size.
|
| 40 |
+
inline void check_foreach_api_restrictions(TensorList tensors) {
|
| 41 |
+
TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor.");
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
inline void check_foreach_api_restrictions(
|
| 45 |
+
TensorList tensors,
|
| 46 |
+
ArrayRef<Scalar> scalars) {
|
| 47 |
+
check_foreach_api_restrictions(tensors);
|
| 48 |
+
TORCH_CHECK(
|
| 49 |
+
tensors.size() == scalars.size(),
|
| 50 |
+
"Tensor list must have same number of elements as scalar list.");
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
inline void check_foreach_api_restrictions(
|
| 54 |
+
TensorList tensors1,
|
| 55 |
+
TensorList tensors2) {
|
| 56 |
+
TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
|
| 57 |
+
TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
|
| 58 |
+
TORCH_CHECK(
|
| 59 |
+
tensors1.size() == tensors2.size(),
|
| 60 |
+
"Tensor lists must have the same number of tensors, got ",
|
| 61 |
+
tensors1.size(),
|
| 62 |
+
" and ",
|
| 63 |
+
tensors2.size());
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
inline void check_foreach_api_restrictions(
|
| 67 |
+
TensorList tensors1,
|
| 68 |
+
TensorList tensors2,
|
| 69 |
+
TensorList tensors3) {
|
| 70 |
+
TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
|
| 71 |
+
TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
|
| 72 |
+
TORCH_CHECK(!tensors3.empty(), "Tensor list must have at least one tensor.");
|
| 73 |
+
TORCH_CHECK(
|
| 74 |
+
tensors1.size() == tensors2.size(),
|
| 75 |
+
"Tensor lists must have the same number of tensors, got ",
|
| 76 |
+
tensors1.size(),
|
| 77 |
+
" and ",
|
| 78 |
+
tensors2.size());
|
| 79 |
+
TORCH_CHECK(
|
| 80 |
+
tensors1.size() == tensors3.size(),
|
| 81 |
+
"Tensor lists must have the same number of tensors, got ",
|
| 82 |
+
tensors1.size(),
|
| 83 |
+
" and ",
|
| 84 |
+
tensors3.size());
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
inline void check_foreach_api_restrictions(
|
| 88 |
+
TensorList tensors1,
|
| 89 |
+
TensorList tensors2,
|
| 90 |
+
TensorList tensors3,
|
| 91 |
+
ArrayRef<Scalar> scalars) {
|
| 92 |
+
check_foreach_api_restrictions(tensors1, tensors2, tensors3);
|
| 93 |
+
TORCH_CHECK(
|
| 94 |
+
tensors1.size() == scalars.size(),
|
| 95 |
+
"Tensor list must have same number of elements as scalar list, got ",
|
| 96 |
+
tensors1.size(),
|
| 97 |
+
" and ",
|
| 98 |
+
scalars.size());
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
// Helper function called in check_fast_path_restrictions to check whether all
|
| 102 |
+
// corresponding tensors (aligning in index across the tensorLists) share the
|
| 103 |
+
// same device and dtype.
|
| 104 |
+
inline bool _check_tensors_share_device_and_dtype(
|
| 105 |
+
ArrayRef<TensorList> tensorLists) {
|
| 106 |
+
const auto expected_dtype = tensorLists[0][0].dtype();
|
| 107 |
+
const auto expected_device = tensorLists[0][0].device();
|
| 108 |
+
|
| 109 |
+
auto is_tensor_okay = [&](const Tensor& tensor) {
|
| 110 |
+
return tensor.dtype() == expected_dtype &&
|
| 111 |
+
tensor.device() == expected_device && tensor.layout() == at::kStrided &&
|
| 112 |
+
tensor.is_non_overlapping_and_dense();
|
| 113 |
+
};
|
| 114 |
+
|
| 115 |
+
for (const auto& tensorList : tensorLists) {
|
| 116 |
+
for (const auto& tensor : tensorList) {
|
| 117 |
+
if (!is_tensor_okay(tensor)) {
|
| 118 |
+
return false;
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
return true;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
// Helper function called in check_fast_path_restrictions to check if
|
| 127 |
+
// corresponding tensors in tensor lists have the same sizes and strides.
|
| 128 |
+
inline bool _check_tensors_share_sizes_and_strides(
|
| 129 |
+
ArrayRef<TensorList> tensorLists) {
|
| 130 |
+
for (const auto i : c10::irange(1, tensorLists.size())) {
|
| 131 |
+
for (const auto j : c10::irange(tensorLists[0].size())) {
|
| 132 |
+
if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() ||
|
| 133 |
+
tensorLists[0][j].strides() != tensorLists[i][j].strides()) {
|
| 134 |
+
return false;
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
return true;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
// Helper function called in check_fast_path_restrictions to check whether
|
| 143 |
+
// all tensors type promote properly with the scalars in scalarList. This
|
| 144 |
+
// function assumes that _check_tensors_share_device_and_dtype has already been
|
| 145 |
+
// called so that all corresponding tensors in tensorLists have the same dtype.
|
| 146 |
+
// Then, it is sufficient to check the type promotion with just one tensorList.
|
| 147 |
+
inline bool _check_tensors_do_type_promotion_with_scalars(
|
| 148 |
+
TensorList tensorList,
|
| 149 |
+
ArrayRef<Scalar> scalarList = {},
|
| 150 |
+
bool does_op_promote_integer_inputs_to_float = false) {
|
| 151 |
+
for (const auto i : c10::irange(tensorList.size())) {
|
| 152 |
+
// For division, integer inputs will result in float.
|
| 153 |
+
if (does_op_promote_integer_inputs_to_float) {
|
| 154 |
+
if (at::isIntegralType(
|
| 155 |
+
tensorList[i].scalar_type(), /*includeBool*/ true)) {
|
| 156 |
+
return false;
|
| 157 |
+
}
|
| 158 |
+
}
|
| 159 |
+
if (!scalarList.empty()) {
|
| 160 |
+
const auto& scalar =
|
| 161 |
+
scalarList.size() == 1 ? scalarList[0] : scalarList[i];
|
| 162 |
+
const auto& tensor = tensorList[i];
|
| 163 |
+
// note(mkozuki): This check might be responsible for
|
| 164 |
+
// `_foreach_add(bool_tensors, bool_tensors)` being pushed to slow path.
|
| 165 |
+
if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) {
|
| 166 |
+
return false;
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
return true;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
// To go via 'fast' path, several conditions must be satisfied
|
| 175 |
+
// - All tensors in all lists must have the same dtype.
|
| 176 |
+
// - All tensors must be on the same device
|
| 177 |
+
// - All tensors must have strided layout
|
| 178 |
+
// - All tensors must be non-overlapping and dense
|
| 179 |
+
// - Resulting tensor must have the same dtype as the input one
|
| 180 |
+
|
| 181 |
+
// Please, make sure to call check_foreach_api_restrictions before calling this
|
| 182 |
+
// method. There is a set of preconditions that have to be satisfied.
|
| 183 |
+
inline bool check_fast_path_restrictions(
|
| 184 |
+
ArrayRef<TensorList> tensorLists,
|
| 185 |
+
ArrayRef<Scalar> scalarList = {},
|
| 186 |
+
bool does_op_promote_integer_inputs_to_float = false) {
|
| 187 |
+
return _check_tensors_share_device_and_dtype(tensorLists) &&
|
| 188 |
+
_check_tensors_share_sizes_and_strides(tensorLists) &&
|
| 189 |
+
_check_tensors_do_type_promotion_with_scalars(
|
| 190 |
+
tensorLists[0],
|
| 191 |
+
scalarList,
|
| 192 |
+
does_op_promote_integer_inputs_to_float);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
|
| 196 |
+
const Tensor& scalarList_,
|
| 197 |
+
int64_t expect_length) {
|
| 198 |
+
std::vector<c10::Scalar> scalarList;
|
| 199 |
+
TORCH_CHECK(
|
| 200 |
+
scalarList_.device() == c10::kCPU,
|
| 201 |
+
"Expected scalars to be on CPU, got ",
|
| 202 |
+
scalarList_.device(),
|
| 203 |
+
" instead.");
|
| 204 |
+
TORCH_CHECK(
|
| 205 |
+
scalarList_.is_contiguous(), "Expected scalars to be contiguous.");
|
| 206 |
+
TORCH_CHECK(
|
| 207 |
+
scalarList_.dim() == 1,
|
| 208 |
+
"Expected packed scalar Tensor to be of dimension 1. Got ",
|
| 209 |
+
scalarList_.dim(),
|
| 210 |
+
" instead.");
|
| 211 |
+
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
|
| 212 |
+
kComplexHalf,
|
| 213 |
+
kHalf,
|
| 214 |
+
kBool,
|
| 215 |
+
kBFloat16,
|
| 216 |
+
scalarList_.scalar_type(),
|
| 217 |
+
"convert_tensor_to_scalar_list",
|
| 218 |
+
[&]() {
|
| 219 |
+
const scalar_t* scalar_data = scalarList_.data_ptr<scalar_t>();
|
| 220 |
+
TORCH_CHECK(
|
| 221 |
+
(expect_length == scalarList_.size(0)),
|
| 222 |
+
"Expected length of scalars to match input of length ",
|
| 223 |
+
expect_length,
|
| 224 |
+
" but got ",
|
| 225 |
+
scalarList_.size(0),
|
| 226 |
+
" instead.");
|
| 227 |
+
for (int64_t i = 0; i < scalarList_.size(0); i++) {
|
| 228 |
+
scalarList.emplace_back(scalar_data[i]);
|
| 229 |
+
}
|
| 230 |
+
});
|
| 231 |
+
return scalarList;
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
inline bool can_use_fast_route(
|
| 235 |
+
ArrayRef<TensorList> tensorLists,
|
| 236 |
+
ArrayRef<Scalar> scalarList = {},
|
| 237 |
+
bool does_op_promote_integer_inputs_to_float = false) {
|
| 238 |
+
return check_fast_path_restrictions(
|
| 239 |
+
tensorLists, scalarList, does_op_promote_integer_inputs_to_float);
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
inline bool can_use_fast_route(
|
| 243 |
+
TensorList tensors1,
|
| 244 |
+
TensorList tensors2,
|
| 245 |
+
bool does_op_promote_integer_inputs_to_float = false) {
|
| 246 |
+
return can_use_fast_route(
|
| 247 |
+
{tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
using DeviceDtypeKey = std::pair<at::Device, at::ScalarType>;
|
| 251 |
+
using IndicesT = std::vector<int>;
|
| 252 |
+
using nested_optional_tensorvec_t =
|
| 253 |
+
std::vector<std::vector<c10::optional<at::Tensor>>>;
|
| 254 |
+
using TensorsAndIndicesT = std::pair<nested_optional_tensorvec_t, IndicesT>;
|
| 255 |
+
using FlatMap = std::unordered_map<
|
| 256 |
+
DeviceDtypeKey,
|
| 257 |
+
TensorsAndIndicesT,
|
| 258 |
+
ParamsHash<DeviceDtypeKey>>;
|
| 259 |
+
|
| 260 |
+
inline FlatMap _group_tensors_by_first_tensors_device_and_dtype(
|
| 261 |
+
const nested_optional_tensorvec_t& nested_tensorlist,
|
| 262 |
+
const bool with_indices) {
|
| 263 |
+
FlatMap grouped_tensors_with_indices;
|
| 264 |
+
|
| 265 |
+
TORCH_CHECK(!nested_tensorlist.empty());
|
| 266 |
+
TORCH_CHECK(!nested_tensorlist[0].empty());
|
| 267 |
+
const auto num_lists = nested_tensorlist.size();
|
| 268 |
+
const auto num_tensors = nested_tensorlist[0].size();
|
| 269 |
+
|
| 270 |
+
TORCH_CHECK(std::all_of(
|
| 271 |
+
nested_tensorlist.cbegin(),
|
| 272 |
+
nested_tensorlist.cend(),
|
| 273 |
+
[&](const auto& tensorlist) -> bool {
|
| 274 |
+
// note(crcrpar): Allow empty tensorlists following
|
| 275 |
+
// ref:
|
| 276 |
+
// https://github.com/pytorch/pytorch/blob/85885301fd3c6adb8b9dc3cf7afadf6945566684/torch/utils/_foreach_utils.py#L21-L24
|
| 277 |
+
return tensorlist.size() == num_tensors || tensorlist.size() == 0;
|
| 278 |
+
}));
|
| 279 |
+
|
| 280 |
+
for (const auto& tensor_index : c10::irange(num_tensors)) {
|
| 281 |
+
const auto key = [&]() -> DeviceDtypeKey {
|
| 282 |
+
const auto t = nested_tensorlist[0][tensor_index];
|
| 283 |
+
TORCH_CHECK(
|
| 284 |
+
t.has_value(),
|
| 285 |
+
"Tensors of the first list of nested Tensor lists are supposed to be defined but ",
|
| 286 |
+
"the ",
|
| 287 |
+
tensor_index,
|
| 288 |
+
"-th Tensor is not.");
|
| 289 |
+
return {t->device(), t->scalar_type()};
|
| 290 |
+
}();
|
| 291 |
+
TORCH_CHECK(
|
| 292 |
+
std::all_of(
|
| 293 |
+
nested_tensorlist.cbegin(),
|
| 294 |
+
nested_tensorlist.cend(),
|
| 295 |
+
[&](const auto& tensorlist) -> bool {
|
| 296 |
+
if (tensorlist.size() == 0) {
|
| 297 |
+
return true;
|
| 298 |
+
}
|
| 299 |
+
const auto& tensor = tensorlist[tensor_index];
|
| 300 |
+
// note(crcrpar): Currently the scope of this function is
|
| 301 |
+
// optimizers so there could be `state_steps` and other scalars
|
| 302 |
+
// whose elements are float tensors no matter what the parameter's
|
| 303 |
+
// dtype is.
|
| 304 |
+
if (!tensor.has_value()) {
|
| 305 |
+
return true;
|
| 306 |
+
} else {
|
| 307 |
+
const auto s = tensor->scalar_type();
|
| 308 |
+
const auto d = tensor->device();
|
| 309 |
+
// Note: `step` or `state_step` is float32 by default.
|
| 310 |
+
if (key.first == d) {
|
| 311 |
+
return key.second == s || s == at::ScalarType::Float;
|
| 312 |
+
} else if (d.is_cpu()) {
|
| 313 |
+
// note(crcrpar): There are some test cases (e.g.
|
| 314 |
+
// TestOptim::test_adam) where state_steps are on CPU and the
|
| 315 |
+
// others are on CUDA. Currently a state_step Tensor has the
|
| 316 |
+
// dtype of float.
|
| 317 |
+
return s == at::ScalarType::Float;
|
| 318 |
+
} else {
|
| 319 |
+
return false;
|
| 320 |
+
}
|
| 321 |
+
}
|
| 322 |
+
}),
|
| 323 |
+
"Tensors of the same index must be on the same device and the same dtype except `step` tensors that can be CPU and float32 notwithstanding");
|
| 324 |
+
if (!grouped_tensors_with_indices.count(key)) {
|
| 325 |
+
grouped_tensors_with_indices.insert(
|
| 326 |
+
{key,
|
| 327 |
+
TensorsAndIndicesT{
|
| 328 |
+
[&]() -> nested_optional_tensorvec_t {
|
| 329 |
+
nested_optional_tensorvec_t nested_tensorvec;
|
| 330 |
+
nested_tensorvec.reserve(num_lists);
|
| 331 |
+
for (const auto& i : c10::irange(num_lists)) {
|
| 332 |
+
std::vector<c10::optional<at::Tensor>> tensors;
|
| 333 |
+
if (!nested_tensorlist[i].empty()) {
|
| 334 |
+
// NB: num_tensors is the max possible length for any of
|
| 335 |
+
// the inner lists of tensor references. Reserving the max
|
| 336 |
+
// trades memory for perf. This should not have significant
|
| 337 |
+
// impact.
|
| 338 |
+
tensors.reserve(num_tensors);
|
| 339 |
+
}
|
| 340 |
+
nested_tensorvec.emplace_back(tensors);
|
| 341 |
+
}
|
| 342 |
+
return nested_tensorvec;
|
| 343 |
+
}(),
|
| 344 |
+
[&]() -> IndicesT {
|
| 345 |
+
if (!with_indices) {
|
| 346 |
+
return {};
|
| 347 |
+
} else {
|
| 348 |
+
IndicesT indices;
|
| 349 |
+
indices.reserve(num_tensors);
|
| 350 |
+
return indices;
|
| 351 |
+
}
|
| 352 |
+
}()}});
|
| 353 |
+
}
|
| 354 |
+
for (const auto& list_index : c10::irange(num_lists)) {
|
| 355 |
+
if (!nested_tensorlist[list_index].empty()) {
|
| 356 |
+
grouped_tensors_with_indices[key].first[list_index].emplace_back(
|
| 357 |
+
nested_tensorlist[list_index][tensor_index]);
|
| 358 |
+
}
|
| 359 |
+
}
|
| 360 |
+
if (with_indices) {
|
| 361 |
+
grouped_tensors_with_indices[key].second.emplace_back(tensor_index);
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
return grouped_tensors_with_indices;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
} // namespace
|
| 369 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/native/DispatchStub.h>
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
using histogramdd_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&);
|
| 9 |
+
using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional<Tensor>&, bool, Tensor&, const TensorList&, bool);
|
| 10 |
+
using histogram_select_outer_bin_edges_fn = void(*)(const Tensor& input, const int64_t N, std::vector<double> &leftmost_edges, std::vector<double> &rightmost_edges);
|
| 11 |
+
|
| 12 |
+
DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub);
|
| 13 |
+
DECLARE_DISPATCH(histogramdd_linear_fn, histogramdd_linear_stub);
|
| 14 |
+
DECLARE_DISPATCH(histogram_select_outer_bin_edges_fn, histogram_select_outer_bin_edges_stub);
|
| 15 |
+
|
| 16 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/Tensor.h>
|
| 2 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 3 |
+
#include <ATen/core/op_registration/op_registration.h>
|
| 4 |
+
#include <ATen/native/UnaryOps.h>
|
| 5 |
+
#include <ATen/native/Resize.h>
|
| 6 |
+
#include <c10/util/irange.h>
|
| 7 |
+
#include <torch/library.h>
|
| 8 |
+
|
| 9 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 10 |
+
#include <ATen/Functions.h>
|
| 11 |
+
#else
|
| 12 |
+
#include <ATen/ops/clone.h>
|
| 13 |
+
|
| 14 |
+
#include <utility>
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
namespace at::native {
|
| 18 |
+
// This fallback should only be used for operations that are self inverse and have a corresponding tensor
|
| 19 |
+
// bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit.
|
| 20 |
+
// Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit.
|
| 21 |
+
// Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called.
|
| 22 |
+
|
| 23 |
+
// NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit.
|
| 24 |
+
struct MathOpFallback {
|
| 25 |
+
MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {}
|
| 26 |
+
virtual bool is_bit_set(const Tensor&) = 0;
|
| 27 |
+
void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) {
|
| 28 |
+
/*
|
| 29 |
+
Situations to handle:
|
| 30 |
+
1. Out-of-place operation. Easy: materialize all inputs and
|
| 31 |
+
call it a day.
|
| 32 |
+
2. Inplace operation. Desugar x.add_(2) into x.conj_().add_(2).conj_().
|
| 33 |
+
Materialize other inputs as in (1).
|
| 34 |
+
3. out= operation. Desugar add(x, 2, out=y) into y.copy_(add(x, 2))
|
| 35 |
+
Materialize other inputs as in (1).
|
| 36 |
+
|
| 37 |
+
It is important to be able to tell if we READ from an argument and if we
|
| 38 |
+
WRITE to an argument. Conservative approach is to assume that we always
|
| 39 |
+
READ from an argument, but in out= operations you can skip
|
| 40 |
+
conjugating inputs on entry that never get used. In the current schema we
|
| 41 |
+
can't easily tell if the operation is in in-place or out= operation.
|
| 42 |
+
|
| 43 |
+
Note:
|
| 44 |
+
1. Mutable tensorlists containing tensors whose math bit set to true are disallowed.
|
| 45 |
+
2. Mutable tensors with math bit set to true are unconditionally cloned to ensure
|
| 46 |
+
correct behavior in the case when the mutable tensor shares memory with non mutable arguments.
|
| 47 |
+
|
| 48 |
+
If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory
|
| 49 |
+
with these mutable inputs would read into wrong values in the following cases:
|
| 50 |
+
1. Non mutable inputs have their math bit set to false.
|
| 51 |
+
2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory
|
| 52 |
+
with one or more mutable arg(s)) are cloned.
|
| 53 |
+
At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs.
|
| 54 |
+
*/
|
| 55 |
+
const auto& arguments = op.schema().arguments();
|
| 56 |
+
const auto num_arguments = arguments.size();
|
| 57 |
+
const auto stack_start = stack->size() - num_arguments;
|
| 58 |
+
|
| 59 |
+
c10::optional<bool> is_write;
|
| 60 |
+
for (const auto i : c10::irange(num_arguments)) {
|
| 61 |
+
// Three possible states:
|
| 62 |
+
// 1. alias_info has no value --> out-of-place operation
|
| 63 |
+
// 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation
|
| 64 |
+
// 3. alias_info does have a value, alias_info->is_write=False --> view operation
|
| 65 |
+
const AliasInfo* alias_info = arguments[i].alias_info();
|
| 66 |
+
if (alias_info != nullptr) {
|
| 67 |
+
if (is_write.has_value()) {
|
| 68 |
+
TORCH_CHECK(*is_write == alias_info->isWrite(),
|
| 69 |
+
"Unsupported operator for ", op_name, " fallback: ", op.schema().name(),
|
| 70 |
+
op_name, " fallback doesn't work for operators with a mix "
|
| 71 |
+
"mutable and non-mutable inputs that alias with outputs, "
|
| 72 |
+
"this must be implemented manually. "
|
| 73 |
+
"If you got this error on a core op, please report a bug to PyTorch.");
|
| 74 |
+
} else {
|
| 75 |
+
is_write = alias_info->isWrite();
|
| 76 |
+
}
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
if (is_write.has_value() && !*is_write) {
|
| 81 |
+
// We assume that view operators automatically handle the math bit
|
| 82 |
+
// correctly by propagating the dispatch key in key_set.
|
| 83 |
+
// This is not necessarily always right, so you should test these cases.
|
| 84 |
+
op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
|
| 85 |
+
return;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
// Mutable inputs with math bit set to True and their clones
|
| 89 |
+
std::vector<std::pair<Tensor, Tensor>> mutable_inputs_with_their_clones;
|
| 90 |
+
for (const auto i : c10::irange(num_arguments)) {
|
| 91 |
+
auto& ivalue = (*stack)[stack_start + i];
|
| 92 |
+
if (!(ivalue.isTensor() || ivalue.isTensorList())) {
|
| 93 |
+
continue;
|
| 94 |
+
}
|
| 95 |
+
const auto& argument = arguments[i];
|
| 96 |
+
bool mut_arg = false;
|
| 97 |
+
if (argument.alias_info()) {
|
| 98 |
+
// Was already tested by is_write loop above
|
| 99 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite());
|
| 100 |
+
mut_arg = true;
|
| 101 |
+
}
|
| 102 |
+
if (ivalue.isTensor()) {
|
| 103 |
+
if (!is_bit_set(ivalue.toTensor())) {
|
| 104 |
+
continue;
|
| 105 |
+
}
|
| 106 |
+
auto tensor = std::move(ivalue).toTensor();
|
| 107 |
+
auto resolved_tensor = at::clone(tensor);
|
| 108 |
+
if (mut_arg) {
|
| 109 |
+
TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ",
|
| 110 |
+
op_name, "bit set to true.");
|
| 111 |
+
mutable_inputs_with_their_clones.emplace_back(std::move(tensor), resolved_tensor);
|
| 112 |
+
}
|
| 113 |
+
(*stack)[stack_start + i] = std::move(resolved_tensor);
|
| 114 |
+
} else if (ivalue.isTensorList()) {
|
| 115 |
+
auto tensors = std::move(ivalue).toTensorList();
|
| 116 |
+
for(const auto j : c10::irange(tensors.size())) {
|
| 117 |
+
const auto& tensor = tensors[j];
|
| 118 |
+
if (!is_bit_set(tensor)) {
|
| 119 |
+
continue;
|
| 120 |
+
}
|
| 121 |
+
TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ",
|
| 122 |
+
op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ",
|
| 123 |
+
op.schema().name());
|
| 124 |
+
tensors[j] = at::clone(tensor);
|
| 125 |
+
}
|
| 126 |
+
(*stack)[stack_start + i] = std::move(tensors);
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack);
|
| 131 |
+
|
| 132 |
+
TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1);
|
| 133 |
+
|
| 134 |
+
for (std::pair<Tensor, Tensor> mut_tensors: mutable_inputs_with_their_clones) {
|
| 135 |
+
auto& mutable_input = mut_tensors.first;
|
| 136 |
+
auto& cloned_mutable_input = mut_tensors.second;
|
| 137 |
+
auto& ivalue = (*stack)[stack_start];
|
| 138 |
+
auto returned_output = std::move(ivalue).toTensor();
|
| 139 |
+
|
| 140 |
+
// sanity check to ensure that the tensor in stack aliases the cloned_mutable_input
|
| 141 |
+
TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output));
|
| 142 |
+
|
| 143 |
+
// necessary for out= arg
|
| 144 |
+
at::native::resize_output(mutable_input, returned_output.sizes());
|
| 145 |
+
|
| 146 |
+
mutable_input.copy_(returned_output);
|
| 147 |
+
(*stack)[stack_start] = std::move(mutable_input);
|
| 148 |
+
}
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
virtual ~MathOpFallback() = default;
|
| 152 |
+
|
| 153 |
+
DispatchKey key;
|
| 154 |
+
string op_name;
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/TensorIterator.h>
|
| 4 |
+
#include <ATen/native/DispatchStub.h>
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm);
|
| 9 |
+
DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub);
|
| 10 |
+
|
| 11 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/native/DispatchStub.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
class Scalar;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
namespace at {
|
| 10 |
+
class Tensor;
|
| 11 |
+
struct TensorIterator;
|
| 12 |
+
struct TensorIteratorBase;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
namespace at::native {
|
| 16 |
+
|
| 17 |
+
using reduce_minmax_fn =
|
| 18 |
+
void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool);
|
| 19 |
+
using structured_reduce_minmax_fn =
|
| 20 |
+
void (*)(const Tensor&, const Tensor&, const Tensor&, int64_t, bool);
|
| 21 |
+
|
| 22 |
+
DECLARE_DISPATCH(structured_reduce_minmax_fn, max_stub);
|
| 23 |
+
DECLARE_DISPATCH(structured_reduce_minmax_fn, min_stub);
|
| 24 |
+
|
| 25 |
+
using where_fn = void (*)(TensorIterator &);
|
| 26 |
+
DECLARE_DISPATCH(where_fn, where_kernel);
|
| 27 |
+
|
| 28 |
+
using is_infinity_op_fn = void (*)(TensorIteratorBase &);
|
| 29 |
+
DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub);
|
| 30 |
+
DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub);
|
| 31 |
+
|
| 32 |
+
using mode_fn = void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool);
|
| 33 |
+
DECLARE_DISPATCH(mode_fn, mode_stub);
|
| 34 |
+
|
| 35 |
+
using clamp_tensor_fn = void (*)(TensorIteratorBase &);
|
| 36 |
+
DECLARE_DISPATCH(clamp_tensor_fn, clamp_stub);
|
| 37 |
+
|
| 38 |
+
namespace detail {
|
| 39 |
+
enum class ClampLimits {Min, Max, MinMax};
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
DECLARE_DISPATCH(void (*)(TensorIteratorBase &, const c10::Scalar&, const c10::Scalar&), clamp_scalar_stub);
|
| 43 |
+
DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_min_scalar_stub);
|
| 44 |
+
DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_max_scalar_stub);
|
| 45 |
+
|
| 46 |
+
using isin_default_fn = void (*)(const Tensor&, const Tensor&, bool, const Tensor&);
|
| 47 |
+
DECLARE_DISPATCH(isin_default_fn, isin_default_stub);
|
| 48 |
+
|
| 49 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/TensorIterator.h>
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorTransformations.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/core/Tensor.h>
|
| 2 |
+
|
| 3 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 4 |
+
#include <ATen/Functions.h>
|
| 5 |
+
#else
|
| 6 |
+
#include <ATen/ops/roll.h>
|
| 7 |
+
#endif
|
| 8 |
+
|
| 9 |
+
#include <c10/util/Exception.h>
|
| 10 |
+
|
| 11 |
+
namespace at::native {
|
| 12 |
+
|
| 13 |
+
static inline Tensor roll_common(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
|
| 14 |
+
TORCH_CHECK(!shifts.empty(), "`shifts` required");
|
| 15 |
+
if (dims.empty() && shifts.size() == 1) {
|
| 16 |
+
auto flattened = self.contiguous().view(self.numel());
|
| 17 |
+
return roll(flattened, shifts[0], 0).view(self.sizes());
|
| 18 |
+
}
|
| 19 |
+
TORCH_CHECK(
|
| 20 |
+
shifts.size() == dims.size(),
|
| 21 |
+
"shifts and dimensions must align. shifts: ", shifts.size(), ", dims:", dims.size()
|
| 22 |
+
);
|
| 23 |
+
AT_ASSERT(dims.size() > 1);
|
| 24 |
+
auto tail_shifts = shifts.slice(1);
|
| 25 |
+
auto tail_dims = dims.slice(1);
|
| 26 |
+
auto first_dim_rolled = roll(self, shifts[0], dims[0]);
|
| 27 |
+
return at::roll(first_dim_rolled, tail_shifts, tail_dims);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/core/IListRef.h>
|
| 5 |
+
|
| 6 |
+
namespace at::native {
|
| 7 |
+
|
| 8 |
+
struct ResultTypeState {
|
| 9 |
+
c10::ScalarType dimResult = ScalarType::Undefined;
|
| 10 |
+
c10::ScalarType wrappedResult = ScalarType::Undefined;
|
| 11 |
+
c10::ScalarType zeroResult = ScalarType::Undefined;
|
| 12 |
+
};
|
| 13 |
+
|
| 14 |
+
TORCH_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state);
|
| 15 |
+
TORCH_API ResultTypeState update_result_type_state(const Scalar& scalar, const ResultTypeState& in_state);
|
| 16 |
+
TORCH_API ScalarType result_type(const ResultTypeState& state);
|
| 17 |
+
|
| 18 |
+
TORCH_API ScalarType result_type(ITensorListRef tensors);
|
| 19 |
+
|
| 20 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/native/layer_norm.h
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/native/DispatchStub.h>
|
| 5 |
+
#include <c10/util/accumulate.h>
|
| 6 |
+
|
| 7 |
+
namespace at::native {
|
| 8 |
+
|
| 9 |
+
namespace {
|
| 10 |
+
|
| 11 |
+
C10_ALWAYS_INLINE std::pair<int64_t, int64_t> _check_layer_norm_inputs(
|
| 12 |
+
const Tensor& input,
|
| 13 |
+
IntArrayRef normalized_shape,
|
| 14 |
+
const Tensor& weight /* optional */,
|
| 15 |
+
const Tensor& bias /* optional */) {
|
| 16 |
+
|
| 17 |
+
const int normalized_ndim = normalized_shape.size();
|
| 18 |
+
TORCH_CHECK(
|
| 19 |
+
normalized_ndim >= 1,
|
| 20 |
+
"Expected normalized_shape to be at least 1-dimensional, i.e., ",
|
| 21 |
+
"containing at least one element, but got normalized_shape = ",
|
| 22 |
+
normalized_shape);
|
| 23 |
+
TORCH_CHECK(
|
| 24 |
+
!weight.defined() || weight.sizes().equals(normalized_shape),
|
| 25 |
+
"Expected weight to be of same shape as normalized_shape, but got ",
|
| 26 |
+
"weight of shape ",
|
| 27 |
+
weight.sizes(),
|
| 28 |
+
" and normalized_shape = ",
|
| 29 |
+
normalized_shape);
|
| 30 |
+
TORCH_CHECK(
|
| 31 |
+
!bias.defined() || bias.sizes().equals(normalized_shape),
|
| 32 |
+
"Expected bias to be of same shape as normalized_shape, but got ",
|
| 33 |
+
"bias of shape ",
|
| 34 |
+
bias.sizes(),
|
| 35 |
+
" and normalized_shape = ",
|
| 36 |
+
normalized_shape);
|
| 37 |
+
|
| 38 |
+
const auto input_shape = input.sizes();
|
| 39 |
+
const auto input_ndim = input.dim();
|
| 40 |
+
|
| 41 |
+
if (input_ndim < normalized_ndim ||
|
| 42 |
+
!input_shape.slice(input_ndim - normalized_ndim)
|
| 43 |
+
.equals(normalized_shape)) {
|
| 44 |
+
std::stringstream ss;
|
| 45 |
+
ss << "Given normalized_shape=" << normalized_shape
|
| 46 |
+
<< ", expected input with shape [*";
|
| 47 |
+
for (auto size : normalized_shape) {
|
| 48 |
+
ss << ", " << size;
|
| 49 |
+
}
|
| 50 |
+
ss << "], but got input of size" << input_shape;
|
| 51 |
+
AT_ERROR(ss.str());
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
const int axis = input_ndim - normalized_ndim;
|
| 55 |
+
const int64_t M =
|
| 56 |
+
c10::multiply_integers(input_shape.cbegin(), input_shape.cbegin() + axis);
|
| 57 |
+
const int64_t N =
|
| 58 |
+
c10::multiply_integers(input_shape.cbegin() + axis, input_shape.cend());
|
| 59 |
+
|
| 60 |
+
return std::make_pair(M, N);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
} // namespace
|
| 64 |
+
|
| 65 |
+
void layer_norm_cpu_out(
|
| 66 |
+
at::Tensor& out,
|
| 67 |
+
const at::Tensor& input,
|
| 68 |
+
const Tensor& gamma,
|
| 69 |
+
const Tensor& beta,
|
| 70 |
+
double eps,
|
| 71 |
+
int64_t M,
|
| 72 |
+
int64_t N);
|
| 73 |
+
|
| 74 |
+
using forward_fn = void (*)(
|
| 75 |
+
const Tensor& /* X */,
|
| 76 |
+
const Tensor& /* gamma */,
|
| 77 |
+
const Tensor& /* beta */,
|
| 78 |
+
int64_t /* M */,
|
| 79 |
+
int64_t /* N */,
|
| 80 |
+
double /* eps */,
|
| 81 |
+
Tensor* /* Y */,
|
| 82 |
+
Tensor* /* mean */,
|
| 83 |
+
Tensor* /* rstd */);
|
| 84 |
+
|
| 85 |
+
using backward_fn = void (*)(
|
| 86 |
+
const Tensor& /* dY */,
|
| 87 |
+
const Tensor& /* X */,
|
| 88 |
+
const Tensor& /* mean */,
|
| 89 |
+
const Tensor& /* rstd */,
|
| 90 |
+
const Tensor& /* gamma */,
|
| 91 |
+
int64_t /* M */,
|
| 92 |
+
int64_t /* N */,
|
| 93 |
+
Tensor* /* dX */,
|
| 94 |
+
Tensor* /* dgamma */,
|
| 95 |
+
Tensor* /* dbeta */);
|
| 96 |
+
|
| 97 |
+
DECLARE_DISPATCH(forward_fn, LayerNormKernel);
|
| 98 |
+
DECLARE_DISPATCH(backward_fn, LayerNormBackwardKernel);
|
| 99 |
+
|
| 100 |
+
} // namespace at::native
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/CachedTensorUtils.h>
|
| 4 |
+
#include <ATen/LegacyBatchedTensorImpl.h>
|
| 5 |
+
#include <ATen/TensorOperators.h>
|
| 6 |
+
#include <torch/csrc/Export.h>
|
| 7 |
+
#include <torch/csrc/autograd/function.h>
|
| 8 |
+
#include <torch/csrc/autograd/utils/grad_layout_contract.h>
|
| 9 |
+
#include <torch/csrc/autograd/variable.h>
|
| 10 |
+
|
| 11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
| 12 |
+
#include <ATen/Functions.h>
|
| 13 |
+
#else
|
| 14 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
#include <mutex>
|
| 18 |
+
|
| 19 |
+
namespace torch {
|
| 20 |
+
namespace autograd {
|
| 21 |
+
|
| 22 |
+
#define CHECK_RESULT(RESULT, VAR) \
|
| 23 |
+
if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \
|
| 24 |
+
VAR.is_sparse_csr())) { \
|
| 25 |
+
if (!utils::obeys_layout_contract(RESULT, VAR)) { \
|
| 26 |
+
TORCH_WARN_ONCE( \
|
| 27 |
+
"grad and param do not obey the gradient layout contract. " \
|
| 28 |
+
"This is not an error, but may impair performance.\n" \
|
| 29 |
+
"grad.sizes() = ", \
|
| 30 |
+
RESULT.sizes(), \
|
| 31 |
+
", strides() = ", \
|
| 32 |
+
RESULT.strides(), \
|
| 33 |
+
"\n", \
|
| 34 |
+
"param.sizes() = ", \
|
| 35 |
+
VAR.sizes(), \
|
| 36 |
+
", strides() = ", \
|
| 37 |
+
VAR.strides()); \
|
| 38 |
+
} \
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
struct TORCH_API AccumulateGrad : public Node {
|
| 42 |
+
explicit AccumulateGrad(Variable variable_);
|
| 43 |
+
|
| 44 |
+
variable_list apply(variable_list&& grads) override;
|
| 45 |
+
|
| 46 |
+
std::vector<std::unique_ptr<FunctionPreHook>>& tensor_pre_hooks() noexcept
|
| 47 |
+
override {
|
| 48 |
+
// NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
|
| 49 |
+
// it can be destroyed even though the Tensor is still alive (contrary
|
| 50 |
+
// to all other Nodes). So we must lazily read the Tensor hooks here.
|
| 51 |
+
return impl::hooks(variable);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
std::unique_ptr<PostAccumulateGradHook>& tensor_post_acc_grad_hooks() noexcept
|
| 55 |
+
override {
|
| 56 |
+
// NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
|
| 57 |
+
// it can be destroyed even though the Tensor is still alive (contrary
|
| 58 |
+
// to all other Nodes). So we must lazily read the Tensor hooks here.
|
| 59 |
+
return impl::post_acc_grad_hooks(variable);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
// Given a variable with its current grad as variable_grad, accumulates
|
| 63 |
+
// new_grad into variable_grad if in place accumulation is possible.
|
| 64 |
+
// Otherwise, uses 'update_grad' to update the grad for the variable.
|
| 65 |
+
|
| 66 |
+
// "Gradient Layout Contract"
|
| 67 |
+
//
|
| 68 |
+
// AccumulateGrad tries to stash strided (non-sparse) grads with memory layout
|
| 69 |
+
// (strides) such that variables and grads interact efficiently in later
|
| 70 |
+
// optimizer kernels, and grads interact efficiently with c10d::Reducer.cpp.
|
| 71 |
+
//
|
| 72 |
+
// Specifically, AccumulateGrad tries to ensure the following
|
| 73 |
+
// (cf torch/csrc/autograd/utils/grad_layout_contract.h):
|
| 74 |
+
// (1) if variable.is_non_overlapping_and_dense(), the stashed grad's
|
| 75 |
+
// strides match variable.
|
| 76 |
+
// (2) else, stashed grad is rowmajor contiguous.
|
| 77 |
+
// If variable's grad does not exist (!variable_grad.defined())
|
| 78 |
+
// AccumulateGrad steals new_grad if it's stealable and obeys the contract
|
| 79 |
+
// already, otherwise it deep copies new_grad into an obedient clone.
|
| 80 |
+
//
|
| 81 |
+
// If variable's grad already exists (variable_grad.defined()), new_grad must
|
| 82 |
+
// be added to variable_grad. If we aren't setting up for double backward
|
| 83 |
+
// (!GradMode::is_enabled()), AccumulateGrad performs "variable_grad +=
|
| 84 |
+
// new_grad" in-place, which keeps variable_grad's layout. We assume (hope)
|
| 85 |
+
// variable_grad was created obeying (1) or (2) at some point in the past.
|
| 86 |
+
//
|
| 87 |
+
// If we are setting up for double backward, AccumulateGrad updates the grad
|
| 88 |
+
// out-of-place via "variable_grad + new_grad." TensorIterator operator+
|
| 89 |
+
// decides result's layout. Typically TensorIterator matches strides of the
|
| 90 |
+
// first arg, so we once again assume (hope) variable_grad was originally
|
| 91 |
+
// created obeying (1) or (2).
|
| 92 |
+
//
|
| 93 |
+
// AccumulateGrad does not enforce the contract with 100% certainty. Examples:
|
| 94 |
+
// - If a user manually permutes a param or its grad, then runs a fwd+bwd,
|
| 95 |
+
// variable_grad += new_grad keeps variable_grad's layout without
|
| 96 |
+
// rechecking the contract.
|
| 97 |
+
// - If TensorIterator changes its corner cases about operator+'s result
|
| 98 |
+
// (for example, giving more or less priority to channels_last inputs, see
|
| 99 |
+
// https://github.com/pytorch/pytorch/pull/37968) the result may not obey.
|
| 100 |
+
//
|
| 101 |
+
// Fortunately, if a given grad doesn't satisfy (1) or (2), the penalty is
|
| 102 |
+
// degraded performance in Reducer.cpp or optimizer kernels, not death by
|
| 103 |
+
// assert or silently bad numerics.
|
| 104 |
+
|
| 105 |
+
// variable: the variable whose grad we're accumulating.
|
| 106 |
+
// variable_grad: the current grad for the variable.
|
| 107 |
+
// new_grad: new grad we want to accumulate for the variable.
|
| 108 |
+
// num_expected_refs: the number of refs we expect to hold internally
|
| 109 |
+
// such that it is safe to avoid cloning the grad
|
| 110 |
+
// if use_count() of the grad is less than or equal
|
| 111 |
+
// to this value (in addition to post_hooks).
|
| 112 |
+
// update_grad: Function that is used to update grad for the variable.
|
| 113 |
+
// The argument to the function is a Tensor which
|
| 114 |
+
// is used to set a new value for the grad.
|
| 115 |
+
template <typename T>
|
| 116 |
+
static void accumulateGrad(
|
| 117 |
+
const Variable& variable,
|
| 118 |
+
at::Tensor& variable_grad,
|
| 119 |
+
const at::Tensor& new_grad,
|
| 120 |
+
size_t num_expected_refs,
|
| 121 |
+
const T& update_grad) {
|
| 122 |
+
if (!variable_grad.defined()) {
|
| 123 |
+
if (!GradMode::is_enabled() && !new_grad.is_sparse() &&
|
| 124 |
+
!new_grad.is_sparse_csr() &&
|
| 125 |
+
!(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) &&
|
| 126 |
+
at::caching::adjusted_use_count(new_grad) <= num_expected_refs &&
|
| 127 |
+
(new_grad.is_mkldnn() ||
|
| 128 |
+
utils::obeys_layout_contract(new_grad, variable))) {
|
| 129 |
+
// we aren't setting up for double-backward
|
| 130 |
+
// not sparse
|
| 131 |
+
// no other user-visible tensor references new_grad
|
| 132 |
+
// new_grad obeys the "Gradient Layout Contract", there has a special
|
| 133 |
+
// case, For MKLDNN tensor, which is a opaque tensor, assuming it obeys
|
| 134 |
+
// layout_contract. Under these conditions, we can steal new_grad
|
| 135 |
+
// without a deep copy.
|
| 136 |
+
update_grad(new_grad.detach());
|
| 137 |
+
} else if (
|
| 138 |
+
!GradMode::is_enabled() && new_grad.is_sparse() &&
|
| 139 |
+
new_grad._indices().is_contiguous() &&
|
| 140 |
+
new_grad._values().is_contiguous() &&
|
| 141 |
+
// Use count for indices and values should always be <=1 since the
|
| 142 |
+
// SparseTensor should be the only one holding a reference to these.
|
| 143 |
+
new_grad._indices().use_count() <= 1 &&
|
| 144 |
+
new_grad._values().use_count() <= 1 &&
|
| 145 |
+
new_grad.use_count() <= num_expected_refs) {
|
| 146 |
+
// Can't detach sparse tensor (since metadata changes are not allowed
|
| 147 |
+
// after detach), so just create a new one for the grad which is a
|
| 148 |
+
// shallow copy. We need a shallow copy so that modifying the original
|
| 149 |
+
// grad tensor doesn't modify the grad we accumulate.
|
| 150 |
+
// We only skip clone if indices and values themselves are contiguous
|
| 151 |
+
// for backward compatibility reasons. Since without this optimization,
|
| 152 |
+
// earlier we would clone the entire SparseTensor which cloned indices
|
| 153 |
+
// and values.
|
| 154 |
+
// For details see https://github.com/pytorch/pytorch/issues/34375.
|
| 155 |
+
|
| 156 |
+
// No scenario where we expect this to be true currently
|
| 157 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 158 |
+
!at::caching::is_cached_tensor(new_grad._indices()) &&
|
| 159 |
+
!at::caching::is_cached_tensor(new_grad._values()) &&
|
| 160 |
+
!at::caching::is_cached_tensor(new_grad));
|
| 161 |
+
|
| 162 |
+
update_grad(at::_sparse_coo_tensor_unsafe(
|
| 163 |
+
new_grad._indices(),
|
| 164 |
+
new_grad._values(),
|
| 165 |
+
new_grad.sizes(),
|
| 166 |
+
new_grad.options()));
|
| 167 |
+
} else {
|
| 168 |
+
if (new_grad.is_sparse() || new_grad.is_sparse_csr() ||
|
| 169 |
+
new_grad.is_nested()) {
|
| 170 |
+
update_grad(new_grad.clone());
|
| 171 |
+
} else {
|
| 172 |
+
if (new_grad.is_mkldnn()) {
|
| 173 |
+
update_grad(new_grad.clone());
|
| 174 |
+
} else {
|
| 175 |
+
// Deep copies new_grad according to the "Gradient Layout Contract."
|
| 176 |
+
update_grad(utils::clone_obey_contract(new_grad, variable));
|
| 177 |
+
}
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
} else if (!GradMode::is_enabled()) {
|
| 181 |
+
// This case is not strictly necessary, but it makes the first-order only
|
| 182 |
+
// case slightly more efficient.
|
| 183 |
+
if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
|
| 184 |
+
// If `variable_grad` is sparse and `new_grad` is not sparse, their
|
| 185 |
+
// sum is not sparse, and we must change the TensorImpl type of
|
| 186 |
+
// `variable_grad` for it to store the result. However, changing the
|
| 187 |
+
// TensorImpl type of a tensor requires changing the tensor itself, and
|
| 188 |
+
// thus in this case we have to change the grad tensor.
|
| 189 |
+
auto result = new_grad + variable_grad;
|
| 190 |
+
CHECK_RESULT(result, variable);
|
| 191 |
+
update_grad(std::move(result));
|
| 192 |
+
} else if (!at::inplaceIsVmapCompatible(variable_grad, new_grad)) {
|
| 193 |
+
// Ideally we'd perform an in-place operation to avoid changing
|
| 194 |
+
// the grad tensor. However, if that's impossible because the grads
|
| 195 |
+
// are vmap-incompatible (See NOTE: [vmap-incompatible in-place
|
| 196 |
+
// operations]), then we just add them out-of-place.
|
| 197 |
+
auto result = variable_grad + new_grad;
|
| 198 |
+
CHECK_RESULT(result, variable);
|
| 199 |
+
update_grad(std::move(result));
|
| 200 |
+
} else {
|
| 201 |
+
// In this case we can avoid changing the grad tensor. There are three
|
| 202 |
+
// scenarios when we'll hit this case:
|
| 203 |
+
//
|
| 204 |
+
// 1. `variable_grad` is sparse, and `new_grad` is sparse.
|
| 205 |
+
// 2. `variable_grad` is dense, and `new_grad` is sparse.
|
| 206 |
+
// 3. `variable_grad` is dense, and `new_grad` is dense.
|
| 207 |
+
// 4. `variable_grad` is mkldnn, and `new_grad` is mkldnn.
|
| 208 |
+
//
|
| 209 |
+
// In all of these four cases, `variable_grad += new_grad` is a
|
| 210 |
+
// valid operation which adds `new_grad` to `variable_grad` in
|
| 211 |
+
// place. `variable_grad` is thus still referring to the same tensor
|
| 212 |
+
// after the operation.
|
| 213 |
+
// Also DistributedDataParallel(DDP) package relies on grad being
|
| 214 |
+
// mutated in place for saving peak memory usage. DDP will still
|
| 215 |
+
// work correctly if it is mutated out of place here, but DDP will
|
| 216 |
+
// maintain one extra copy of grad tensors in buffer and thus
|
| 217 |
+
// increase peak memory usage.
|
| 218 |
+
variable_grad += new_grad;
|
| 219 |
+
CHECK_RESULT(variable_grad, variable);
|
| 220 |
+
// ^ We could enforce the contract more aggressively here by writing:
|
| 221 |
+
// if (variable_grad.is_sparse() || new_grad.is_sparse()) {
|
| 222 |
+
// variable_grad += new_grad;
|
| 223 |
+
// } else if (obeys_layout_contract(variable_grad, variable)) {
|
| 224 |
+
// variable_grad += new_grad;
|
| 225 |
+
// } else {
|
| 226 |
+
// result = at::empty_strided(variable.sizes(), variable.strides(),
|
| 227 |
+
// variable.options().memory_format(c10::nullopt));
|
| 228 |
+
// update_grad(at::native::add_out(result, variable_grad,
|
| 229 |
+
// new_grad, 1.0);
|
| 230 |
+
// }
|
| 231 |
+
// However, that accumulation is sometimes in place and sometimes not,
|
| 232 |
+
// which may break user code.
|
| 233 |
+
}
|
| 234 |
+
} else {
|
| 235 |
+
at::Tensor result;
|
| 236 |
+
if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
|
| 237 |
+
// CPU backend throws an error on sparse + dense, so prefer dense +
|
| 238 |
+
// sparse here.
|
| 239 |
+
result = new_grad + variable_grad;
|
| 240 |
+
} else {
|
| 241 |
+
// Assumes operator+ result typically matches strides of first arg,
|
| 242 |
+
// and hopes variable_grad was originally created obeying layout
|
| 243 |
+
// contract.
|
| 244 |
+
result = variable_grad + new_grad;
|
| 245 |
+
}
|
| 246 |
+
CHECK_RESULT(result, variable);
|
| 247 |
+
update_grad(std::move(result));
|
| 248 |
+
// ^ We could enforce the contract more aggressively here by saying
|
| 249 |
+
// if (obeys_layout_contract(new_grad, variable)) {
|
| 250 |
+
// update_grad(new_grad + variable_grad);
|
| 251 |
+
// } else {
|
| 252 |
+
// update_grad(variable_grad + new_grad);
|
| 253 |
+
// }
|
| 254 |
+
// such that the stashed grad is likely to have the right strides if
|
| 255 |
+
// either variable_grad or new_grad already has the right strides.
|
| 256 |
+
// We could enforce the contract with certainty by saying
|
| 257 |
+
// auto result = variable_grad + new_grad (or vice versa), checking
|
| 258 |
+
// result's layout, and copying to an obedient clone if necessary before
|
| 259 |
+
// update_grad. The copy would require another gmem pass. We can't create
|
| 260 |
+
// empty result with the right layout then add_out into it with a single
|
| 261 |
+
// kernel, because GradMode is enabled in this branch, and add_out isn't
|
| 262 |
+
// differentiable. Maybe more trouble than it's worth.
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
void compiled_args(CompiledNodeArgs& args) override;
|
| 267 |
+
variable_list apply_with_saved(
|
| 268 |
+
const variable_list& inputs,
|
| 269 |
+
SwapSavedVariables& saved) override;
|
| 270 |
+
|
| 271 |
+
Variable variable;
|
| 272 |
+
};
|
| 273 |
+
|
| 274 |
+
#undef CHECK_RESULT
|
| 275 |
+
|
| 276 |
+
} // namespace autograd
|
| 277 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/irange.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
#include <torch/csrc/autograd/function.h>
|
| 6 |
+
#include <torch/csrc/autograd/variable.h>
|
| 7 |
+
|
| 8 |
+
#include <memory>
|
| 9 |
+
#include <string>
|
| 10 |
+
#include <vector>
|
| 11 |
+
|
| 12 |
+
namespace torch {
|
| 13 |
+
namespace autograd {
|
| 14 |
+
|
| 15 |
+
struct TORCH_API Error : public Node {
|
| 16 |
+
Error(std::string msg, edge_list&& next_edges)
|
| 17 |
+
: Node(std::move(next_edges)), msg(std::move(msg)) {}
|
| 18 |
+
|
| 19 |
+
Error(std::string msg) : msg(std::move(msg)) {}
|
| 20 |
+
|
| 21 |
+
variable_list apply(variable_list&& inputs) override;
|
| 22 |
+
|
| 23 |
+
void compiled_args(CompiledNodeArgs& args) override;
|
| 24 |
+
variable_list apply_with_saved(
|
| 25 |
+
const variable_list& inputs,
|
| 26 |
+
SwapSavedVariables& saved) override;
|
| 27 |
+
|
| 28 |
+
std::string msg;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
// We print grad_fn names in tensor printing. For functions with backward
|
| 32 |
+
// NYI, grad_fn=<Error> will be printed if we use Error, which is confusing. So
|
| 33 |
+
// special case with a new NotImplemented function here.
|
| 34 |
+
struct TORCH_API NotImplemented : public Error {
|
| 35 |
+
NotImplemented(const std::string& forward_fn, edge_list&& next_edges)
|
| 36 |
+
: Error(
|
| 37 |
+
"derivative for " + forward_fn + " is not implemented",
|
| 38 |
+
std::move(next_edges)) {}
|
| 39 |
+
|
| 40 |
+
NotImplemented(const std::string& forward_fn)
|
| 41 |
+
: Error("derivative for " + forward_fn + " is not implemented") {}
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
// Identity in forward, Error in backward. Used to implement
|
| 45 |
+
// @once_differentiable
|
| 46 |
+
struct TORCH_API DelayedError : public Node {
|
| 47 |
+
DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) {
|
| 48 |
+
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
|
| 49 |
+
for (const auto i : c10::irange(num_inputs)) {
|
| 50 |
+
(void)i; // Suppress unused variable warning
|
| 51 |
+
add_input_metadata(Node::undefined_input());
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
variable_list apply(variable_list&& inputs) override;
|
| 56 |
+
|
| 57 |
+
std::string msg;
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
struct TORCH_API UndefinedGrad : public Node {
|
| 61 |
+
UndefinedGrad() {
|
| 62 |
+
add_input_metadata(Node::undefined_input());
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
variable_list apply(variable_list&& inputs) override;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
struct TORCH_API UndefinedGradBackward : public Node {
|
| 69 |
+
UndefinedGradBackward(edge_list&& next_edges) : Node(std::move(next_edges)) {}
|
| 70 |
+
|
| 71 |
+
UndefinedGradBackward() = default;
|
| 72 |
+
|
| 73 |
+
variable_list apply(variable_list&& inputs) override;
|
| 74 |
+
|
| 75 |
+
void compiled_args(CompiledNodeArgs& args) override {}
|
| 76 |
+
variable_list apply_with_saved(
|
| 77 |
+
const variable_list& inputs,
|
| 78 |
+
SwapSavedVariables& saved) override {
|
| 79 |
+
return apply(variable_list(inputs));
|
| 80 |
+
}
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
struct TORCH_API GraphRoot : public Node {
|
| 84 |
+
GraphRoot(edge_list functions, variable_list inputs)
|
| 85 |
+
: Node(std::move(functions)), outputs(std::move(inputs)) {
|
| 86 |
+
// Ensures calls to stream() on a GraphRoot instance reflect current
|
| 87 |
+
// stream(s) on devices of root grad tensors at the time the instance is
|
| 88 |
+
// constructed.
|
| 89 |
+
for (const auto& t : outputs) {
|
| 90 |
+
add_input_metadata(t);
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
variable_list apply(variable_list&& inputs) override {
|
| 95 |
+
return outputs;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
void compiled_args(CompiledNodeArgs& args) override;
|
| 99 |
+
variable_list apply_with_saved(
|
| 100 |
+
const variable_list& inputs,
|
| 101 |
+
SwapSavedVariables& saved) override;
|
| 102 |
+
|
| 103 |
+
variable_list outputs;
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
struct TORCH_API Identity : public Node {
|
| 107 |
+
variable_list apply(variable_list&& inputs) override;
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
} // namespace autograd
|
| 111 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/autograd/function.h>
|
| 5 |
+
#include <torch/csrc/autograd/variable.h>
|
| 6 |
+
|
| 7 |
+
#include <ATen/ATen.h>
|
| 8 |
+
#include <c10/cuda/CUDAStream.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
|
| 11 |
+
#include <cstddef>
|
| 12 |
+
#include <vector>
|
| 13 |
+
|
| 14 |
+
namespace torch {
|
| 15 |
+
namespace autograd {
|
| 16 |
+
|
| 17 |
+
struct TORCH_CUDA_CU_API Scatter : public Node {
|
| 18 |
+
explicit Scatter(
|
| 19 |
+
std::vector<at::Device> devices,
|
| 20 |
+
c10::optional<std::vector<int64_t>> chunk_sizes = c10::nullopt,
|
| 21 |
+
int64_t dim = 0,
|
| 22 |
+
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams =
|
| 23 |
+
c10::nullopt,
|
| 24 |
+
bool unsqueeze_scalars = false);
|
| 25 |
+
~Scatter() override;
|
| 26 |
+
|
| 27 |
+
variable_list apply(variable_list&& inputs) override;
|
| 28 |
+
|
| 29 |
+
std::vector<at::Device> devices_;
|
| 30 |
+
c10::optional<std::vector<int64_t>> chunk_sizes_;
|
| 31 |
+
int64_t dim_;
|
| 32 |
+
c10::optional<std::vector<c10::optional<at::cuda::CUDAStream>>> streams_;
|
| 33 |
+
bool unsqueeze_scalars_;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
struct TORCH_CUDA_CU_API Gather : public Node {
|
| 37 |
+
explicit Gather(const at::Device& destination_device, int64_t dim = 0);
|
| 38 |
+
~Gather() override;
|
| 39 |
+
|
| 40 |
+
variable_list apply(variable_list&& inputs) override;
|
| 41 |
+
|
| 42 |
+
at::Device destination_device_;
|
| 43 |
+
int64_t dim_;
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
} // namespace autograd
|
| 47 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <pybind11/pybind11.h>
|
| 4 |
+
#include <pybind11/stl.h>
|
| 5 |
+
#include <torch/csrc/python_headers.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
#include <torch/csrc/autograd/python_cpp_function.h>
|
| 9 |
+
#include <torch/csrc/autograd/python_function.h>
|
| 10 |
+
|
| 11 |
+
namespace py = pybind11;
|
| 12 |
+
|
| 13 |
+
namespace pybind11 {
|
| 14 |
+
namespace detail {}
|
| 15 |
+
} // namespace pybind11
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/autograd/function.h>
|
| 5 |
+
#include <torch/csrc/autograd/variable.h>
|
| 6 |
+
|
| 7 |
+
#include <ATen/TensorGeometry.h>
|
| 8 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
|
| 11 |
+
#include <cstdint>
|
| 12 |
+
#include <memory>
|
| 13 |
+
|
| 14 |
+
namespace torch {
|
| 15 |
+
namespace autograd {
|
| 16 |
+
|
| 17 |
+
struct TORCH_API CopyBackwards : public Node {
|
| 18 |
+
variable_list apply(variable_list&& grads) override;
|
| 19 |
+
void compiled_args(CompiledNodeArgs& args) override;
|
| 20 |
+
variable_list apply_with_saved(
|
| 21 |
+
const variable_list& inputs,
|
| 22 |
+
SwapSavedVariables& saved) override;
|
| 23 |
+
|
| 24 |
+
at::TensorOptions src_options;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
// Note [View + Inplace update for base tensor]
|
| 28 |
+
//
|
| 29 |
+
// This note covers a few important topics related to view + inplace handling.
|
| 30 |
+
// - It explains what is the CopySlices Node and why we need it.
|
| 31 |
+
// - It explains the considerations on what is saved for backward in
|
| 32 |
+
// CopySlices.
|
| 33 |
+
// - It explains why we need to sometimes change the exec_info of the current
|
| 34 |
+
// backward
|
| 35 |
+
//
|
| 36 |
+
// What is CopySlices?
|
| 37 |
+
// ~~~~~~~~~~~~~~~~~~~
|
| 38 |
+
//
|
| 39 |
+
// We support autograd with inplace mutation; e.g., if you write x.mul_(2)
|
| 40 |
+
// the autograd will work as if you now had multiple Tensors under the hood and
|
| 41 |
+
// you did
|
| 42 |
+
// x = t.clone()
|
| 43 |
+
// x0 = x
|
| 44 |
+
// x1 = x0 * 2
|
| 45 |
+
// x = x1
|
| 46 |
+
// As you can see here, after this operation, x.grad_fn now points to x1.grad_fn
|
| 47 |
+
// (the MulBackward node) and this node points to x's original grad_fn (which is
|
| 48 |
+
// also x0.grad_fn). It is important to keep in mind that after the inplace,
|
| 49 |
+
// there is no Tensor object that represents the x0 state anymore. But the graph
|
| 50 |
+
// for it is still around in autograd (in case x was used before being modified
|
| 51 |
+
// inplace). See Example 1 in
|
| 52 |
+
// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
|
| 53 |
+
// We call this rebasing the history of the Tensor.
|
| 54 |
+
//
|
| 55 |
+
// Now, a difficult situation is what happens if x is a differentiable view
|
| 56 |
+
// of a base b.
|
| 57 |
+
// b = t.clone()
|
| 58 |
+
// x = b.select(0, 0)
|
| 59 |
+
// x *= 2
|
| 60 |
+
// With the same approach as above, this will become
|
| 61 |
+
// b = t.clone()
|
| 62 |
+
// x = b.select(0, 0)
|
| 63 |
+
// b0 = b
|
| 64 |
+
// x0 = x
|
| 65 |
+
// x1 = x0 * 2
|
| 66 |
+
// b1 = b0.select_scatter(x1, 0, 0)
|
| 67 |
+
// x2 = b1.select(0, 0)
|
| 68 |
+
// x = x2
|
| 69 |
+
// b = b1
|
| 70 |
+
// As you can see here, not only we need to modify x's grad_fn, we also need to
|
| 71 |
+
// modify the one from b. We also need to ensure that the new grad_fn on x is
|
| 72 |
+
// linked to b's new grad_fn. The chain the select_scatter, multiplication and
|
| 73 |
+
// select is what CopySlices does, all wrapped into a single Node.
|
| 74 |
+
//
|
| 75 |
+
// See Example 1 in
|
| 76 |
+
// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
|
| 77 |
+
//
|
| 78 |
+
// What do we need to save in CopySlices to run backward?
|
| 79 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 80 |
+
//
|
| 81 |
+
// We need to perform grad_view = fn(grad_view), but out-of-place.
|
| 82 |
+
// view_fn_ is an optional lambda function saved in DifferentiableViewMeta
|
| 83 |
+
// from forward pass, so that we can recover we when as_strided is not
|
| 84 |
+
// supported. It preserves the invariants:
|
| 85 |
+
// view = view_fn_(base)
|
| 86 |
+
// grad_view = view_fn_(grad_base)
|
| 87 |
+
//
|
| 88 |
+
// When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_
|
| 89 |
+
// is empty and we save TensorGeometry(view) instead.
|
| 90 |
+
// With the TensorGeometry information we can use `as_strided` call which
|
| 91 |
+
// is more efficient to recover views in backward.
|
| 92 |
+
//
|
| 93 |
+
// For example:
|
| 94 |
+
// view_1 = view_op_1(base)
|
| 95 |
+
// view_2 = view_op_2(view_1)
|
| 96 |
+
// ...
|
| 97 |
+
// view_n = view_op_n(view_n-1)
|
| 98 |
+
// view_n = inplace_op(view_n)
|
| 99 |
+
//
|
| 100 |
+
// In CPU/CUDA case where we support efficient as_strided implementation,
|
| 101 |
+
// grad_view_n can be calculated through 1 step.
|
| 102 |
+
//
|
| 103 |
+
// grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset);
|
| 104 |
+
//
|
| 105 |
+
// But in XLA backend where we don't have full support of as_strided,
|
| 106 |
+
// it has to save a chained lambda function view_fn_, to exactly
|
| 107 |
+
// replay how the view was done in forward.
|
| 108 |
+
//
|
| 109 |
+
// view_fn_ = view_op_n(...(view_op_2(view_op_1())))
|
| 110 |
+
// grad_view_n = view_fn_(grad_base)
|
| 111 |
+
//
|
| 112 |
+
// This chain view_fn_ works as long as forward view ops are implemented,
|
| 113 |
+
// e.g XLA simulates view without a real Storage behind Tensor, but it's less
|
| 114 |
+
// efficient than the as_strided one so we should be careful to only use it when
|
| 115 |
+
// necessary.
|
| 116 |
+
//
|
| 117 |
+
// - For CPU/CUDA we save TensorGeometry of both base and view tensors,
|
| 118 |
+
// That's all we need to pass into as_strided.
|
| 119 |
+
// E.g. int[] sizes, int[] strides, and int storage_offset.
|
| 120 |
+
// - For XLA we use view_fn_, which captures all forward view op arguments
|
| 121 |
+
// by **value**.
|
| 122 |
+
// E.g for at::narrow, int dim, int start, in length are saved.
|
| 123 |
+
//
|
| 124 |
+
// Theoretically we could also save Tensor `view` in CopySlices Node, but
|
| 125 |
+
// it's far more expensive than what we currently save.
|
| 126 |
+
// 1. We cannot afford keeping large tensors alive to recover views only.
|
| 127 |
+
// 2. There are inplace checks when Tensors are loaded back to make sure
|
| 128 |
+
// they haven't been changed (including size metadata).
|
| 129 |
+
// So saving metadata like TensorGeometry/view arguments is much better
|
| 130 |
+
// because it is minimal information needed to recover views, as well as it
|
| 131 |
+
// allows the user to modify the original Tensor without preventing the
|
| 132 |
+
// backward pass from running.
|
| 133 |
+
//
|
| 134 |
+
// Why do we manually change exec_info in the apply?
|
| 135 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 136 |
+
//
|
| 137 |
+
// Using the same example as before,
|
| 138 |
+
// b = t.clone()
|
| 139 |
+
// x = b.select(0, 0)
|
| 140 |
+
// x *= y
|
| 141 |
+
//
|
| 142 |
+
// You can see the visualization at
|
| 143 |
+
// https://docs.google.com/drawings/d/1Bx-Hcz-zlIv7PabQqnPhUIVIs9F8WWi48svqMsAUMFs
|
| 144 |
+
// which contains the wrapped MulBackward Node and show what it links to.
|
| 145 |
+
// Since a backward can happen between any subset of the inputs (t and y) and
|
| 146 |
+
// outputs (o, x, b). It is possible to get into a state where CopySlices's 0th
|
| 147 |
+
// next function (CloneBackward) needs gradient but MulBackward's 0th next
|
| 148 |
+
// function (SelectBackward) is not. This happens if you do autograd.grad
|
| 149 |
+
// between x and t for example.
|
| 150 |
+
// In such a case, we do need to mark SelectBackward as requiring gradient such
|
| 151 |
+
// that, during the execution of MulBackward, we will actually compute gradient
|
| 152 |
+
// for the 0th input.
|
| 153 |
+
//
|
| 154 |
+
// All the other next functions are always shared (this is asserted in the apply
|
| 155 |
+
// code) and so nothing needs to be done for them.
|
| 156 |
+
|
| 157 |
+
// See Note [View + Inplace update for view tensor] for what we do to view
|
| 158 |
+
// tensor when an in-place operation happens.
|
| 159 |
+
struct TORCH_API CopySlices : public Node {
|
| 160 |
+
CopySlices(
|
| 161 |
+
const Variable& base_var,
|
| 162 |
+
at::TensorGeometry view_,
|
| 163 |
+
std::function<at::Tensor(const at::Tensor&)> view_fn_,
|
| 164 |
+
std::shared_ptr<Node> fn_);
|
| 165 |
+
|
| 166 |
+
// common code between apply/apply_with_saved
|
| 167 |
+
template <typename T>
|
| 168 |
+
variable_list apply_impl(variable_list&& inputs, const T& call_fn);
|
| 169 |
+
|
| 170 |
+
variable_list apply(variable_list&& inputs) override;
|
| 171 |
+
void release_variables() override;
|
| 172 |
+
void compiled_args(CompiledNodeArgs& args) override;
|
| 173 |
+
variable_list apply_with_saved(
|
| 174 |
+
const variable_list& inputs,
|
| 175 |
+
SwapSavedVariables& saved) override;
|
| 176 |
+
|
| 177 |
+
at::TensorGeometry base;
|
| 178 |
+
// view and view_fn are redundant and view_fn will be used if available.
|
| 179 |
+
// See Note [View + Inplace update for base tensor] for details.
|
| 180 |
+
at::TensorGeometry view;
|
| 181 |
+
std::function<at::Tensor(const at::Tensor&)> view_fn;
|
| 182 |
+
std::shared_ptr<Node> fn;
|
| 183 |
+
};
|
| 184 |
+
|
| 185 |
+
} // namespace autograd
|
| 186 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/autograd/InferenceMode.h>
|
| 5 |
+
#include <torch/csrc/autograd/autograd.h>
|
| 6 |
+
#include <torch/csrc/autograd/function.h>
|
| 7 |
+
#include <torch/csrc/autograd/variable.h>
|
| 8 |
+
#include <torch/csrc/utils/variadic.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/core/Tensor.h>
|
| 11 |
+
|
| 12 |
+
#include <functional>
|
| 13 |
+
#include <memory>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
namespace torch {
|
| 17 |
+
namespace autograd {
|
| 18 |
+
|
| 19 |
+
using function_constructor = std::function<std::shared_ptr<Node>(edge_list&&)>;
|
| 20 |
+
|
| 21 |
+
/**
|
| 22 |
+
* Wraps the tensor outputs in variables and creates the grad_fn and sets the
|
| 23 |
+
* grad_fn if necessary.
|
| 24 |
+
*/
|
| 25 |
+
TORCH_API variable_list wrap_outputs(
|
| 26 |
+
const variable_list& inputs,
|
| 27 |
+
tensor_list&& outputs,
|
| 28 |
+
const function_constructor& ctr);
|
| 29 |
+
|
| 30 |
+
/// Checks that inputs contains exactly `args` items and that the first
|
| 31 |
+
/// `required_args`
|
| 32 |
+
/// items are not nullptr. If not specified, `required_args` defaults to `args`.
|
| 33 |
+
TORCH_API void check_input_variables(
|
| 34 |
+
const char* name,
|
| 35 |
+
const variable_list& inputs,
|
| 36 |
+
int args,
|
| 37 |
+
int required_args = -1,
|
| 38 |
+
bool allow_undefined = false);
|
| 39 |
+
|
| 40 |
+
struct ComputeRequiresGrad : IterArgs<ComputeRequiresGrad> {
|
| 41 |
+
bool out = false;
|
| 42 |
+
using IterArgs<ComputeRequiresGrad>::operator();
|
| 43 |
+
void operator()(const at::Tensor& tensor) {
|
| 44 |
+
const auto& var = static_cast<const Variable&>(tensor);
|
| 45 |
+
if (var.defined() && var.requires_grad()) {
|
| 46 |
+
out = true;
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
void operator()(const c10::optional<at::Tensor>& tensor) {
|
| 50 |
+
if (tensor.has_value()) {
|
| 51 |
+
(*this)(*tensor);
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
bool short_circuit() {
|
| 55 |
+
return out;
|
| 56 |
+
}
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
template <typename... Args>
|
| 60 |
+
inline bool compute_requires_grad(Args&&... args) {
|
| 61 |
+
if (!GradMode::is_enabled()) {
|
| 62 |
+
return false;
|
| 63 |
+
}
|
| 64 |
+
return ComputeRequiresGrad().apply(std::forward<Args>(args)...).out;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
inline void set_history(
|
| 68 |
+
at::Tensor& variable,
|
| 69 |
+
const std::shared_ptr<Node>& grad_fn) {
|
| 70 |
+
TORCH_CHECK(grad_fn != nullptr);
|
| 71 |
+
if (variable.defined()) {
|
| 72 |
+
// If the codegen triggers this, you most likely want to add your newly
|
| 73 |
+
// added function to the DONT_REQUIRE_DERIVATIVE list in
|
| 74 |
+
// tools/autograd/gen_variable_type.py
|
| 75 |
+
TORCH_INTERNAL_ASSERT(isDifferentiableType(variable.scalar_type()));
|
| 76 |
+
auto output_nr = grad_fn->add_input_metadata(variable);
|
| 77 |
+
impl::set_gradient_edge(variable, {grad_fn, output_nr});
|
| 78 |
+
} else {
|
| 79 |
+
grad_fn->add_input_metadata(Node::undefined_input());
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
inline void set_history(
|
| 84 |
+
std::vector<Variable>&& variables,
|
| 85 |
+
const std::shared_ptr<Node>& grad_fn) {
|
| 86 |
+
for (auto& variable : variables) {
|
| 87 |
+
set_history(variable, grad_fn);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
inline void set_history(
|
| 92 |
+
std::vector<Variable>& variables,
|
| 93 |
+
const std::shared_ptr<Node>& grad_fn) {
|
| 94 |
+
for (auto& variable : variables) {
|
| 95 |
+
set_history(variable, grad_fn);
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
inline bool isFwGradDefined(const c10::optional<at::Tensor>& t) {
|
| 100 |
+
return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined();
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) {
|
| 104 |
+
bool ret = false;
|
| 105 |
+
for (auto& variable : variables) {
|
| 106 |
+
ret |= isFwGradDefined(variable);
|
| 107 |
+
}
|
| 108 |
+
return ret;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
inline bool isFwGradDefinedTensorList(
|
| 112 |
+
const c10::List<c10::optional<at::Tensor>>& li) {
|
| 113 |
+
bool ret = false;
|
| 114 |
+
for (auto i : c10::irange(li.size())) {
|
| 115 |
+
auto t = li.get(i);
|
| 116 |
+
ret |= (t.has_value() && isFwGradDefined(t.value()));
|
| 117 |
+
}
|
| 118 |
+
return ret;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
} // namespace autograd
|
| 122 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated from ../tools/autograd/templates/VariableType.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/core/Tensor.h>
|
| 6 |
+
#include <ATen/Context.h>
|
| 7 |
+
|
| 8 |
+
#include <c10/util/intrusive_ptr.h>
|
| 9 |
+
|
| 10 |
+
#include <torch/csrc/Export.h>
|
| 11 |
+
#include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
|
| 12 |
+
|
| 13 |
+
#include <cstdint> // for size_t
|
| 14 |
+
#include <functional> // for function
|
| 15 |
+
#include <memory> // for unique_ptr
|
| 16 |
+
#include <string>
|
| 17 |
+
#include <vector>
|
| 18 |
+
|
| 19 |
+
namespace at {
|
| 20 |
+
struct Quantizer;
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
namespace torch { namespace autograd {
|
| 24 |
+
|
| 25 |
+
using Variable = at::Tensor;
|
| 26 |
+
using at::Context;
|
| 27 |
+
using at::Device;
|
| 28 |
+
using at::Dimname;
|
| 29 |
+
using at::DimnameList;
|
| 30 |
+
using at::Generator;
|
| 31 |
+
using at::IntArrayRef;
|
| 32 |
+
using at::MemoryFormat;
|
| 33 |
+
using at::QScheme;
|
| 34 |
+
using at::Scalar;
|
| 35 |
+
using at::ScalarType;
|
| 36 |
+
using at::Storage;
|
| 37 |
+
using at::Tensor;
|
| 38 |
+
using at::TensorList;
|
| 39 |
+
using at::TensorOptions;
|
| 40 |
+
using at::Quantizer;
|
| 41 |
+
// This is temporary typedef to enable Quantizer in aten native function API
|
| 42 |
+
// we'll remove them when we are actually exposing Quantizer class
|
| 43 |
+
// to frontend
|
| 44 |
+
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
| 45 |
+
using c10::optional;
|
| 46 |
+
|
| 47 |
+
namespace VariableType {
|
| 48 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
|
| 49 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes();
|
| 50 |
+
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
|
| 51 |
+
|
| 52 |
+
at::Tensor & unpack(Tensor & t, const char * name, int pos);
|
| 53 |
+
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
|
| 54 |
+
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
|
| 55 |
+
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
}} // namespace torch::autograd
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <Python.h>
|
| 4 |
+
|
| 5 |
+
// @generated from ../tools/autograd/templates/python_functions.h
|
| 6 |
+
|
| 7 |
+
// Python bindings for automatically generated autograd functions
|
| 8 |
+
|
| 9 |
+
namespace torch { namespace autograd { namespace generated {
|
| 10 |
+
|
| 11 |
+
void initialize_autogenerated_functions_0(PyObject* module);
|
| 12 |
+
void initialize_autogenerated_functions_1(PyObject* module);
|
| 13 |
+
void initialize_autogenerated_functions_2(PyObject* module);
|
| 14 |
+
void initialize_autogenerated_functions_3(PyObject* module);
|
| 15 |
+
void initialize_autogenerated_functions_4(PyObject* module);
|
| 16 |
+
|
| 17 |
+
inline void initialize_autogenerated_functions(PyObject* module) {
|
| 18 |
+
initialize_autogenerated_functions_0(module);
|
| 19 |
+
initialize_autogenerated_functions_1(module);
|
| 20 |
+
initialize_autogenerated_functions_2(module);
|
| 21 |
+
initialize_autogenerated_functions_3(module);
|
| 22 |
+
initialize_autogenerated_functions_4(module);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
}}} // namespace torch::autograd::generated
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch {
|
| 4 |
+
namespace autograd {
|
| 5 |
+
namespace generated {
|
| 6 |
+
|
| 7 |
+
PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_namedtuple();
|
| 8 |
+
PyTypeObject* get__fused_moving_avg_obs_fq_helper_namedtuple();
|
| 9 |
+
PyTypeObject* get__linalg_det_namedtuple();
|
| 10 |
+
PyTypeObject* get__linalg_det_out_namedtuple();
|
| 11 |
+
PyTypeObject* get__linalg_eigh_namedtuple();
|
| 12 |
+
PyTypeObject* get__linalg_eigh_out_namedtuple();
|
| 13 |
+
PyTypeObject* get__linalg_slogdet_namedtuple();
|
| 14 |
+
PyTypeObject* get__linalg_slogdet_out_namedtuple();
|
| 15 |
+
PyTypeObject* get__linalg_solve_ex_namedtuple();
|
| 16 |
+
PyTypeObject* get__linalg_solve_ex_out_namedtuple();
|
| 17 |
+
PyTypeObject* get__linalg_svd_namedtuple();
|
| 18 |
+
PyTypeObject* get__linalg_svd_out_namedtuple();
|
| 19 |
+
PyTypeObject* get__lu_with_info_namedtuple();
|
| 20 |
+
PyTypeObject* get__scaled_dot_product_efficient_attention_namedtuple();
|
| 21 |
+
PyTypeObject* get__scaled_dot_product_flash_attention_namedtuple();
|
| 22 |
+
PyTypeObject* get__unpack_dual_namedtuple();
|
| 23 |
+
PyTypeObject* get_aminmax_namedtuple();
|
| 24 |
+
PyTypeObject* get_aminmax_out_namedtuple();
|
| 25 |
+
PyTypeObject* get_cummax_namedtuple();
|
| 26 |
+
PyTypeObject* get_cummax_out_namedtuple();
|
| 27 |
+
PyTypeObject* get_cummin_namedtuple();
|
| 28 |
+
PyTypeObject* get_cummin_out_namedtuple();
|
| 29 |
+
PyTypeObject* get_frexp_namedtuple();
|
| 30 |
+
PyTypeObject* get_frexp_out_namedtuple();
|
| 31 |
+
PyTypeObject* get_geqrf_out_namedtuple();
|
| 32 |
+
PyTypeObject* get_geqrf_namedtuple();
|
| 33 |
+
PyTypeObject* get_histogram_out_namedtuple();
|
| 34 |
+
PyTypeObject* get_histogram_namedtuple();
|
| 35 |
+
PyTypeObject* get_histogramdd_namedtuple();
|
| 36 |
+
PyTypeObject* get_kthvalue_namedtuple();
|
| 37 |
+
PyTypeObject* get_kthvalue_out_namedtuple();
|
| 38 |
+
PyTypeObject* get_linalg_cholesky_ex_namedtuple();
|
| 39 |
+
PyTypeObject* get_linalg_cholesky_ex_out_namedtuple();
|
| 40 |
+
PyTypeObject* get_linalg_eig_namedtuple();
|
| 41 |
+
PyTypeObject* get_linalg_eig_out_namedtuple();
|
| 42 |
+
PyTypeObject* get_linalg_eigh_namedtuple();
|
| 43 |
+
PyTypeObject* get_linalg_eigh_out_namedtuple();
|
| 44 |
+
PyTypeObject* get_linalg_inv_ex_namedtuple();
|
| 45 |
+
PyTypeObject* get_linalg_inv_ex_out_namedtuple();
|
| 46 |
+
PyTypeObject* get_linalg_ldl_factor_namedtuple();
|
| 47 |
+
PyTypeObject* get_linalg_ldl_factor_out_namedtuple();
|
| 48 |
+
PyTypeObject* get_linalg_ldl_factor_ex_namedtuple();
|
| 49 |
+
PyTypeObject* get_linalg_ldl_factor_ex_out_namedtuple();
|
| 50 |
+
PyTypeObject* get_linalg_lstsq_namedtuple();
|
| 51 |
+
PyTypeObject* get_linalg_lstsq_out_namedtuple();
|
| 52 |
+
PyTypeObject* get_linalg_lu_namedtuple();
|
| 53 |
+
PyTypeObject* get_linalg_lu_out_namedtuple();
|
| 54 |
+
PyTypeObject* get_linalg_lu_factor_namedtuple();
|
| 55 |
+
PyTypeObject* get_linalg_lu_factor_out_namedtuple();
|
| 56 |
+
PyTypeObject* get_linalg_lu_factor_ex_namedtuple();
|
| 57 |
+
PyTypeObject* get_linalg_lu_factor_ex_out_namedtuple();
|
| 58 |
+
PyTypeObject* get_linalg_qr_namedtuple();
|
| 59 |
+
PyTypeObject* get_linalg_qr_out_namedtuple();
|
| 60 |
+
PyTypeObject* get_linalg_slogdet_namedtuple();
|
| 61 |
+
PyTypeObject* get_linalg_slogdet_out_namedtuple();
|
| 62 |
+
PyTypeObject* get_linalg_solve_ex_namedtuple();
|
| 63 |
+
PyTypeObject* get_linalg_solve_ex_out_namedtuple();
|
| 64 |
+
PyTypeObject* get_linalg_svd_namedtuple();
|
| 65 |
+
PyTypeObject* get_linalg_svd_out_namedtuple();
|
| 66 |
+
PyTypeObject* get_lu_unpack_namedtuple();
|
| 67 |
+
PyTypeObject* get_lu_unpack_out_namedtuple();
|
| 68 |
+
PyTypeObject* get_max_namedtuple();
|
| 69 |
+
PyTypeObject* get_max_out_namedtuple();
|
| 70 |
+
PyTypeObject* get_median_namedtuple();
|
| 71 |
+
PyTypeObject* get_median_out_namedtuple();
|
| 72 |
+
PyTypeObject* get_min_namedtuple();
|
| 73 |
+
PyTypeObject* get_min_out_namedtuple();
|
| 74 |
+
PyTypeObject* get_mode_namedtuple();
|
| 75 |
+
PyTypeObject* get_mode_out_namedtuple();
|
| 76 |
+
PyTypeObject* get_nanmedian_namedtuple();
|
| 77 |
+
PyTypeObject* get_nanmedian_out_namedtuple();
|
| 78 |
+
PyTypeObject* get_qr_out_namedtuple();
|
| 79 |
+
PyTypeObject* get_qr_namedtuple();
|
| 80 |
+
PyTypeObject* get_slogdet_namedtuple();
|
| 81 |
+
PyTypeObject* get_slogdet_out_namedtuple();
|
| 82 |
+
PyTypeObject* get_sort_out_namedtuple();
|
| 83 |
+
PyTypeObject* get_sort_namedtuple();
|
| 84 |
+
PyTypeObject* get_svd_out_namedtuple();
|
| 85 |
+
PyTypeObject* get_svd_namedtuple();
|
| 86 |
+
PyTypeObject* get_topk_out_namedtuple();
|
| 87 |
+
PyTypeObject* get_topk_namedtuple();
|
| 88 |
+
PyTypeObject* get_triangular_solve_out_namedtuple();
|
| 89 |
+
PyTypeObject* get_triangular_solve_namedtuple();
|
| 90 |
+
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
void initReturnTypes(PyObject* module);
|
| 94 |
+
|
| 95 |
+
} // namespace autograd
|
| 96 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <sstream>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace autograd {
|
| 7 |
+
namespace utils {
|
| 8 |
+
|
| 9 |
+
inline std::string requires_grad_leaf_error(bool requires_grad) {
|
| 10 |
+
std::ostringstream oss;
|
| 11 |
+
oss << "you can only change requires_grad flags of leaf variables.";
|
| 12 |
+
if (requires_grad == false) {
|
| 13 |
+
oss << " If you want to use a computed variable in a subgraph "
|
| 14 |
+
"that doesn't require differentiation use "
|
| 15 |
+
"var_no_grad = var.detach().";
|
| 16 |
+
}
|
| 17 |
+
return oss.str();
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
} // namespace utils
|
| 21 |
+
} // namespace autograd
|
| 22 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace autograd {
|
| 7 |
+
namespace utils {
|
| 8 |
+
|
| 9 |
+
// Helper functions to enforce the "Gradient Layout Contract" described in
|
| 10 |
+
// torch/csrc/autograd/functions/accumulate_grad.h.
|
| 11 |
+
|
| 12 |
+
// Checks if grad obeys the contract with variable.
|
| 13 |
+
inline bool obeys_layout_contract(
|
| 14 |
+
const at::Tensor& grad,
|
| 15 |
+
const at::Tensor& variable) {
|
| 16 |
+
TORCH_INTERNAL_ASSERT(!grad.is_sparse());
|
| 17 |
+
TORCH_INTERNAL_ASSERT(!grad.is_sparse_csr());
|
| 18 |
+
TORCH_INTERNAL_ASSERT(!variable.is_sparse_csr());
|
| 19 |
+
|
| 20 |
+
if (variable.is_nested()) {
|
| 21 |
+
// TODO: Nested Tensor does not have an implementation of detach. The
|
| 22 |
+
// current implementation of nested tensor likely does obey the gradient
|
| 23 |
+
// contract and should return true, but this would likely change in the
|
| 24 |
+
// future
|
| 25 |
+
return false;
|
| 26 |
+
} else if (variable.is_sparse()) {
|
| 27 |
+
// Gradient Layout Contract is not applicable for sparse layouts
|
| 28 |
+
return false;
|
| 29 |
+
} else if (variable.is_non_overlapping_and_dense()) {
|
| 30 |
+
// Only look at stride for dimensions that are not of size 1.
|
| 31 |
+
const auto& grad_sizes = grad.sym_sizes();
|
| 32 |
+
const auto& grad_strides = grad.sym_strides();
|
| 33 |
+
const auto& variable_strides = variable.sym_strides();
|
| 34 |
+
for (const auto idx : c10::irange(grad_sizes.size())) {
|
| 35 |
+
if (grad_sizes[idx] != 1) {
|
| 36 |
+
if (grad_strides[idx] != variable_strides[idx]) {
|
| 37 |
+
return false;
|
| 38 |
+
}
|
| 39 |
+
} else {
|
| 40 |
+
// This should not be needed but we don't check if a Tensor has views
|
| 41 |
+
// before stashing it. And 0-strided Tensors of size 1 are actually
|
| 42 |
+
// views for ops like cat.
|
| 43 |
+
// TODO: Actually detect views in the accumulateGrad function so that
|
| 44 |
+
// this Tensor is not considered at all.
|
| 45 |
+
if (grad_strides[idx] == 0) {
|
| 46 |
+
return false;
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
return true;
|
| 51 |
+
} else {
|
| 52 |
+
return grad.is_contiguous(at::MemoryFormat::Contiguous);
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
// Creates a clone of new_grad that obeys the contract with variable.
|
| 57 |
+
// The clone should attach to new_grad's history if GradMode::is_enabled().
|
| 58 |
+
inline at::Tensor clone_obey_contract(
|
| 59 |
+
const at::Tensor& new_grad,
|
| 60 |
+
const at::Tensor& variable) {
|
| 61 |
+
if (variable.is_non_overlapping_and_dense()) {
|
| 62 |
+
// (1)
|
| 63 |
+
// Does this dicey-looking sequence attach the result to new_grad's
|
| 64 |
+
// history if GradMode::is_enabled()? Yes, and @alband says it should.
|
| 65 |
+
return std::move(new_grad
|
| 66 |
+
.new_empty_strided_symint(
|
| 67 |
+
variable.sym_sizes(),
|
| 68 |
+
variable.sym_strides(),
|
| 69 |
+
variable.options().memory_format(c10::nullopt))
|
| 70 |
+
.copy_(new_grad));
|
| 71 |
+
} else {
|
| 72 |
+
// (2)
|
| 73 |
+
return new_grad.clone(at::MemoryFormat::Contiguous);
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
} // namespace utils
|
| 78 |
+
} // namespace autograd
|
| 79 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/autograd/function_hook.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace autograd {
|
| 7 |
+
namespace utils {
|
| 8 |
+
|
| 9 |
+
// Turns lambda into a torch::autograd::FunctionPostHook.
|
| 10 |
+
class LambdaPostHook : public torch::autograd::FunctionPostHook {
|
| 11 |
+
using variable_list = std::vector<torch::autograd::Variable>;
|
| 12 |
+
|
| 13 |
+
public:
|
| 14 |
+
// The lambda function takes as arguments the outputs and inputs of the
|
| 15 |
+
// autograd function and can modify the outputs of the autograd function by
|
| 16 |
+
// returning a new output if needed.
|
| 17 |
+
/* implicit */ LambdaPostHook(
|
| 18 |
+
std::function<variable_list(const variable_list&, const variable_list&)>
|
| 19 |
+
fn)
|
| 20 |
+
: fn_(std::move(fn)) {}
|
| 21 |
+
|
| 22 |
+
variable_list operator()(
|
| 23 |
+
const variable_list& outputs,
|
| 24 |
+
const variable_list& inputs) override {
|
| 25 |
+
return fn_(outputs, inputs);
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
protected:
|
| 29 |
+
std::function<variable_list(const variable_list&, const variable_list&)> fn_;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
} // namespace utils
|
| 33 |
+
} // namespace autograd
|
| 34 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/utils/python_arg_parser.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace autograd {
|
| 10 |
+
namespace utils {
|
| 11 |
+
|
| 12 |
+
// The parameter allow_copy is to accept copy for Tensor.to (and by proxy
|
| 13 |
+
// PackedSequences.to) but not nn.Module.to.
|
| 14 |
+
inline std::tuple<
|
| 15 |
+
c10::optional<at::Device>,
|
| 16 |
+
c10::optional<at::ScalarType>,
|
| 17 |
+
bool,
|
| 18 |
+
bool,
|
| 19 |
+
c10::optional<at::MemoryFormat>>
|
| 20 |
+
parse_to_conversion(PythonArgs& r, bool allow_copy) {
|
| 21 |
+
if (r.idx == 0) {
|
| 22 |
+
if (!allow_copy && !r.isNone(3))
|
| 23 |
+
throw std::runtime_error(".to() does not accept copy argument");
|
| 24 |
+
return std::make_tuple(
|
| 25 |
+
r.deviceOptional(0),
|
| 26 |
+
r.scalartypeOptional(1),
|
| 27 |
+
r.toBool(2),
|
| 28 |
+
r.toBool(3),
|
| 29 |
+
r.memoryformatOptional(4));
|
| 30 |
+
} else if (r.idx == 1) {
|
| 31 |
+
if (!allow_copy && !r.isNone(2))
|
| 32 |
+
throw std::runtime_error(".to() does not accept copy argument");
|
| 33 |
+
return std::make_tuple(
|
| 34 |
+
c10::nullopt,
|
| 35 |
+
r.scalartype(0),
|
| 36 |
+
r.toBool(1),
|
| 37 |
+
r.toBool(2),
|
| 38 |
+
r.memoryformatOptional(3));
|
| 39 |
+
} else {
|
| 40 |
+
auto tensor = r.tensor(0);
|
| 41 |
+
if (!allow_copy && !r.isNone(2))
|
| 42 |
+
throw std::runtime_error(".to() does not accept copy argument");
|
| 43 |
+
return std::make_tuple(
|
| 44 |
+
tensor.device(),
|
| 45 |
+
tensor.scalar_type(),
|
| 46 |
+
r.toBool(1),
|
| 47 |
+
r.toBool(2),
|
| 48 |
+
r.memoryformatOptional(3));
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
} // namespace utils
|
| 52 |
+
} // namespace autograd
|
| 53 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/Exception.h>
|
| 3 |
+
|
| 4 |
+
#include <mutex>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace autograd {
|
| 9 |
+
namespace utils {
|
| 10 |
+
|
| 11 |
+
// Warning handler for multi-threaded contexts. Gather warnings from
|
| 12 |
+
// all threads into a single queue, then process together at the end
|
| 13 |
+
// in the main thread.
|
| 14 |
+
class DelayWarningHandler : public at::WarningHandler {
|
| 15 |
+
public:
|
| 16 |
+
~DelayWarningHandler() override = default;
|
| 17 |
+
void replay_warnings();
|
| 18 |
+
|
| 19 |
+
private:
|
| 20 |
+
void process(const c10::Warning& warning) override;
|
| 21 |
+
|
| 22 |
+
std::vector<c10::Warning> warnings_;
|
| 23 |
+
std::mutex mutex_;
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace utils
|
| 27 |
+
} // namespace autograd
|
| 28 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <mutex>
|
| 4 |
+
#include <unordered_map>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/distributed/autograd/context/context.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace distributed {
|
| 10 |
+
namespace autograd {
|
| 11 |
+
|
| 12 |
+
// Singleton class per worker which is responsible for storing the distributed
|
| 13 |
+
// autograd context for each autograd pass and also cleans up data for an
|
| 14 |
+
// autograd pass once its done.
|
| 15 |
+
//
|
| 16 |
+
// Each autograd pass is assigned a unique autograd_context_id and all data for
|
| 17 |
+
// that pass (DistAutogradContext) is stored in this container indexed by the
|
| 18 |
+
// autograd_context_id. The autograd_context_id itself is a 64 bit globally
|
| 19 |
+
// unique id. The first 16 bits is the worker_id and the next 48 bits is an
|
| 20 |
+
// auto-incrementing id for each worker.
|
| 21 |
+
//
|
| 22 |
+
// This container is also responsible for maintaining a globally unique message
|
| 23 |
+
// id, which is used to associate send/recv autograd function pairs. The format
|
| 24 |
+
// is similar to the autograd_context_id where we have a 64 bit integer with
|
| 25 |
+
// first 16 bits being the worker id and next 48 bits are auto-incrementing.
|
| 26 |
+
class TORCH_API DistAutogradContainer {
|
| 27 |
+
public:
|
| 28 |
+
explicit DistAutogradContainer(uint32_t num_shards);
|
| 29 |
+
|
| 30 |
+
// One time initialization of the container.
|
| 31 |
+
static DistAutogradContainer& init(int64_t worker_id);
|
| 32 |
+
|
| 33 |
+
// Retrieve the singleton instance of the container, ensures we have
|
| 34 |
+
// initialized the container.
|
| 35 |
+
static DistAutogradContainer& getInstance();
|
| 36 |
+
|
| 37 |
+
// Create a new context for a distributed autograd pass.
|
| 38 |
+
const ContextPtr newContext();
|
| 39 |
+
|
| 40 |
+
// Clean up resources for a given context_id once the autograd pass is done.
|
| 41 |
+
// Sends RPC to other workers this worker knows about, telling them to clean
|
| 42 |
+
// up their context as well. Throws an exception if the context_id does not
|
| 43 |
+
// exist.
|
| 44 |
+
void releaseContext(int64_t context_id);
|
| 45 |
+
|
| 46 |
+
// Releases an autograd context if it is present on this node. Also sends RPC
|
| 47 |
+
// to other workers this worker knows about, telling them to clean up their
|
| 48 |
+
// context. Does nothing if it is not present.
|
| 49 |
+
void releaseContextIfPresent(int64_t context_id);
|
| 50 |
+
|
| 51 |
+
// Checks if the passed in context_id is valid.
|
| 52 |
+
void isValidContext(int64_t context_id);
|
| 53 |
+
|
| 54 |
+
// Retrieve the autograd context for a given context_id.
|
| 55 |
+
ContextPtr retrieveContext(int64_t context_id);
|
| 56 |
+
|
| 57 |
+
// Retrieves the currently active autograd context for the current thread.
|
| 58 |
+
ContextPtr currentContext();
|
| 59 |
+
|
| 60 |
+
// Checks whether or not the current thread has a valid autograd context.
|
| 61 |
+
bool hasValidContext() const;
|
| 62 |
+
|
| 63 |
+
// Generate a new autograd_message_id for send/recv autograd functions.
|
| 64 |
+
int64_t newAutogradMessageId();
|
| 65 |
+
|
| 66 |
+
// Creates a new autograd context with the provided context_id. If a context
|
| 67 |
+
// already exists with the provided context_id, we just return it.
|
| 68 |
+
// This does not set the current context for the current thread.
|
| 69 |
+
ContextPtr getOrCreateContext(int64_t context_id);
|
| 70 |
+
|
| 71 |
+
// Retrieves the maximum possible autograd_context_id/autograd_message_id that
|
| 72 |
+
// can be generated by this worker.
|
| 73 |
+
int64_t getMaxId();
|
| 74 |
+
|
| 75 |
+
// Retrieves the worker ID for this node
|
| 76 |
+
rpc::worker_id_t getWorkerId() const;
|
| 77 |
+
|
| 78 |
+
// Can set current context id if there is no valid context yet
|
| 79 |
+
static void setCurrentContextId(int64_t contextId);
|
| 80 |
+
|
| 81 |
+
// Forcibly sets the thread local current context id. Should only be used in
|
| 82 |
+
// cases where you know what you're doing and need to override the thread
|
| 83 |
+
// local. Otherwise, use setCurrentContextId instead.
|
| 84 |
+
static void forceCurrentContextId(int64_t contextId);
|
| 85 |
+
|
| 86 |
+
// Clear current context id
|
| 87 |
+
void clearCurrentContext();
|
| 88 |
+
|
| 89 |
+
// Returns the number of autograd contexts in the container.
|
| 90 |
+
size_t numAutogradContexts() const;
|
| 91 |
+
|
| 92 |
+
// Returns the current thread local context id for this thread.
|
| 93 |
+
static int64_t currentContextId();
|
| 94 |
+
|
| 95 |
+
DistAutogradContainer(const DistAutogradContainer&) = delete;
|
| 96 |
+
DistAutogradContainer& operator=(const DistAutogradContainer&) = delete;
|
| 97 |
+
DistAutogradContainer(DistAutogradContainer&&) = delete;
|
| 98 |
+
DistAutogradContainer& operator=(DistAutogradContainer&&) = delete;
|
| 99 |
+
|
| 100 |
+
private:
|
| 101 |
+
// Number of shards for the map storing autograd contexts. We'd like this
|
| 102 |
+
// to be a power of 2 and we don't expect a value much higher than the
|
| 103 |
+
// number of cores would provide much benefit.
|
| 104 |
+
static constexpr uint32_t kNumDefaultShards = 128;
|
| 105 |
+
|
| 106 |
+
// Use cache line size for alignment.
|
| 107 |
+
static constexpr int kCacheLineSize = 64;
|
| 108 |
+
|
| 109 |
+
// Structure holding one shard of the sharded autograd context map with its
|
| 110 |
+
// associated lock. Align to cache line size to avoid contention between
|
| 111 |
+
// adjacent entries.
|
| 112 |
+
struct alignas(kCacheLineSize) ContextsShard {
|
| 113 |
+
// Lock for this shard.
|
| 114 |
+
mutable std::mutex lock;
|
| 115 |
+
|
| 116 |
+
// Map storing autograd contexts for this shard.
|
| 117 |
+
std::unordered_map<int64_t, ContextPtr> contexts;
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
DistAutogradContainer() = delete;
|
| 121 |
+
~DistAutogradContainer() = default;
|
| 122 |
+
|
| 123 |
+
static DistAutogradContainer& getInstanceInternal();
|
| 124 |
+
|
| 125 |
+
// Retrieve the shard for given context_id.
|
| 126 |
+
ContextsShard& getShard(int64_t context_id);
|
| 127 |
+
|
| 128 |
+
// Sends an RPC to the workers that have a context corresponding to passed in
|
| 129 |
+
// context_id. This function should be called with the lock.
|
| 130 |
+
void sendReleaseContextRpc(
|
| 131 |
+
const std::unordered_set<rpc::worker_id_t>& workerIds,
|
| 132 |
+
int64_t context_id);
|
| 133 |
+
|
| 134 |
+
// Erase context_id from the autograd context map, and reset the thread local
|
| 135 |
+
// current context id if it corresponds to the passed in context id. This
|
| 136 |
+
// function should be called with the lock.
|
| 137 |
+
void eraseContextIdAndReset(ContextsShard& shard, int64_t context_id);
|
| 138 |
+
|
| 139 |
+
// Compute the number of shards for the autograd_contexts_ map.
|
| 140 |
+
static uint32_t computeNumShards();
|
| 141 |
+
|
| 142 |
+
// Auto incrementing context id used to identify unique autograd passes.
|
| 143 |
+
// Initialized with the first 16 bits being the worker_id.
|
| 144 |
+
std::atomic<int64_t> next_context_id_;
|
| 145 |
+
|
| 146 |
+
// Unique id to identify a worker in the distributed setting.
|
| 147 |
+
int16_t worker_id_;
|
| 148 |
+
|
| 149 |
+
// Whether or not the container has been initialized appropriately.
|
| 150 |
+
bool initialized_;
|
| 151 |
+
|
| 152 |
+
// Sharded autograd context map.
|
| 153 |
+
std::vector<ContextsShard> autograd_contexts_;
|
| 154 |
+
|
| 155 |
+
// Number of shards for the sharded autograd_contexts_ map.
|
| 156 |
+
uint32_t num_shards_;
|
| 157 |
+
|
| 158 |
+
// Autograd message id to identify unique send/recv autograd function pairs.
|
| 159 |
+
std::atomic<int64_t> next_autograd_message_id_;
|
| 160 |
+
|
| 161 |
+
// Maximum allowed value for autograd_context_id or autograd_message_id.
|
| 162 |
+
int64_t max_id_;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
} // namespace autograd
|
| 166 |
+
} // namespace distributed
|
| 167 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/autograd/function.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace distributed {
|
| 7 |
+
namespace autograd {
|
| 8 |
+
|
| 9 |
+
// As part of our distributed autograd implementation, whenever we send an RPC
|
| 10 |
+
// from one node to another, we add a 'SendRpcBackward' autograd function to the
|
| 11 |
+
// autograd graph. This is more or less a placeholder function that is used to
|
| 12 |
+
// kickoff the autograd engine on the current worker on the backward pass. The
|
| 13 |
+
// edges for this autograd function are the inputs to the RPC method.
|
| 14 |
+
//
|
| 15 |
+
// During the backward pass, this function is queued for execution in the
|
| 16 |
+
// autograd engine which eventually runs the rest of the autograd graph.
|
| 17 |
+
struct TORCH_API SendRpcBackward : public torch::autograd::Node {
|
| 18 |
+
public:
|
| 19 |
+
torch::autograd::variable_list apply(
|
| 20 |
+
torch::autograd::variable_list&& inputs) override;
|
| 21 |
+
|
| 22 |
+
// SendRpcBackward is actually the root of an autograd graph on the local
|
| 23 |
+
// node. As a result, it doesn't receive any 'inputs', but rather the RPC
|
| 24 |
+
// framework passes gradients over to this function to kickoff local autograd
|
| 25 |
+
// computation.
|
| 26 |
+
void setGrads(const torch::autograd::variable_list& grads);
|
| 27 |
+
|
| 28 |
+
// Retrieve the grads for the function.
|
| 29 |
+
const torch::autograd::variable_list& getGrads() const;
|
| 30 |
+
|
| 31 |
+
private:
|
| 32 |
+
torch::autograd::variable_list grads_;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace autograd
|
| 36 |
+
} // namespace distributed
|
| 37 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/utils/python_compat.h>
|
| 4 |
+
|
| 5 |
+
// Functions that need to be copied from the CPython source
|
| 6 |
+
// should go in cpython_defs.c. Copying is required when, e.g.,
|
| 7 |
+
// we need to call internal CPython functions that are not exposed.
|
| 8 |
+
|
| 9 |
+
#if IS_PYTHON_3_11_PLUS
|
| 10 |
+
|
| 11 |
+
#include <internal/pycore_frame.h>
|
| 12 |
+
|
| 13 |
+
int THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame* frame);
|
| 14 |
+
|
| 15 |
+
PyFunctionObject* _PyFunction_CopyWithNewCode(
|
| 16 |
+
PyFunctionObject* o,
|
| 17 |
+
PyCodeObject* code);
|
| 18 |
+
|
| 19 |
+
void THP_PyFrame_Clear(_PyInterpreterFrame* frame);
|
| 20 |
+
|
| 21 |
+
#endif
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <Python.h>
|
| 3 |
+
|
| 4 |
+
extern "C" {
|
| 5 |
+
PyObject* torch_c_dynamo_eval_frame_init(void);
|
| 6 |
+
extern bool is_dynamo_compiling;
|
| 7 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/python_headers.h>
|
| 3 |
+
|
| 4 |
+
PyObject* torch_c_dynamo_guards_init();
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// C2039 MSVC
|
| 4 |
+
#include <pybind11/complex.h>
|
| 5 |
+
#include <pybind11/pybind11.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
#include <Python.h>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace dynamo {
|
| 12 |
+
void initDynamoBindings(PyObject* torch);
|
| 13 |
+
}
|
| 14 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/utils/python_stub.h>
|
| 3 |
+
|
| 4 |
+
// see [Note: Compiled Autograd]
|
| 5 |
+
namespace torch::dynamo::autograd {
|
| 6 |
+
PyObject* torch_c_dynamo_compiled_autograd_init();
|
| 7 |
+
} // namespace torch::dynamo::autograd
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// WARNING: Be careful when adding new includes here. This header will be used
|
| 4 |
+
// in model.so, and should not refer to any aten/c10 headers except the stable
|
| 5 |
+
// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule
|
| 6 |
+
// applies to other files under torch/csrc/inductor/aoti_runtime/.
|
| 7 |
+
|
| 8 |
+
#ifdef USE_CUDA
|
| 9 |
+
|
| 10 |
+
// FIXME: Currently, CPU and CUDA backend are mutually exclusive.
|
| 11 |
+
// This is a temporary workaround. We need a better way to support
|
| 12 |
+
// multi devices.
|
| 13 |
+
|
| 14 |
+
#include <cuda.h>
|
| 15 |
+
#include <cuda_runtime_api.h>
|
| 16 |
+
|
| 17 |
+
#define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \
|
| 18 |
+
do { \
|
| 19 |
+
const cudaError_t code = EXPR; \
|
| 20 |
+
const char* msg = cudaGetErrorString(code); \
|
| 21 |
+
if (code != cudaSuccess) { \
|
| 22 |
+
throw std::runtime_error( \
|
| 23 |
+
std::string("CUDA error: ") + std::string(msg)); \
|
| 24 |
+
} \
|
| 25 |
+
} while (0)
|
| 26 |
+
|
| 27 |
+
namespace torch {
|
| 28 |
+
namespace aot_inductor {
|
| 29 |
+
|
| 30 |
+
using DeviceStreamType = cudaStream_t;
|
| 31 |
+
|
| 32 |
+
} // namespace aot_inductor
|
| 33 |
+
} // namespace torch
|
| 34 |
+
|
| 35 |
+
#else // !USE_CUDA
|
| 36 |
+
|
| 37 |
+
#define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \
|
| 38 |
+
bool ok = EXPR; \
|
| 39 |
+
if (!ok) { \
|
| 40 |
+
throw std::runtime_error("CPU runtime error"); \
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
namespace torch {
|
| 44 |
+
namespace aot_inductor {
|
| 45 |
+
|
| 46 |
+
using DeviceStreamType = void*;
|
| 47 |
+
|
| 48 |
+
} // namespace aot_inductor
|
| 49 |
+
} // namespace torch
|
| 50 |
+
|
| 51 |
+
#endif // USE_CUDA
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h
ADDED
|
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <iostream>
|
| 4 |
+
#include <optional>
|
| 5 |
+
#include <sstream>
|
| 6 |
+
#include <stdexcept>
|
| 7 |
+
#include <string>
|
| 8 |
+
#include <unordered_map>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
// WARNING: Be careful when adding new includes here. This header will be used
|
| 12 |
+
// in model.so, and should not refer to any aten/c10 headers except the stable
|
| 13 |
+
// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule
|
| 14 |
+
// applies to other files under torch/csrc/inductor/aoti_runtime/.
|
| 15 |
+
#include <torch/csrc/inductor/aoti_runtime/device_utils.h>
|
| 16 |
+
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
|
| 17 |
+
|
| 18 |
+
#define AOTI_RUNTIME_CHECK(EXPR, MSG) \
|
| 19 |
+
do { \
|
| 20 |
+
bool ok = EXPR; \
|
| 21 |
+
if (!ok) { \
|
| 22 |
+
throw std::runtime_error(MSG); \
|
| 23 |
+
} \
|
| 24 |
+
} while (0)
|
| 25 |
+
|
| 26 |
+
#if defined(__GNUC__) || defined(__clang__)
|
| 27 |
+
#define AOTI_NOINLINE __attribute__((noinline))
|
| 28 |
+
#elif _MSC_VER
|
| 29 |
+
#define AOTI_NOINLINE __declspec(noinline)
|
| 30 |
+
#else
|
| 31 |
+
#define AOTI_NOINLINE
|
| 32 |
+
#endif
|
| 33 |
+
|
| 34 |
+
// At codegen time, we write out a binary file called constants.bin.
|
| 35 |
+
// We then turn the raw binary to an object file that exposes this
|
| 36 |
+
// symbol and link it into the final .so.
|
| 37 |
+
// For information on the binary format, see `man objcopy`, under
|
| 38 |
+
// the "binary-architecture" flag:
|
| 39 |
+
// https://man7.org/linux/man-pages/man1/objcopy.1.html
|
| 40 |
+
// todo: use #embed in C++ 23 once available
|
| 41 |
+
extern const uint8_t _binary_constants_bin_start[];
|
| 42 |
+
extern const uint8_t _binary_constants_bin_end[];
|
| 43 |
+
|
| 44 |
+
#define AOTI_CONST_GPU_ALIGNMENT 64
|
| 45 |
+
|
| 46 |
+
namespace {
|
| 47 |
+
|
| 48 |
+
#ifdef USE_CUDA
|
| 49 |
+
|
| 50 |
+
using CUDAPtr = std::unique_ptr<void, std::function<void(void*)>>;
|
| 51 |
+
|
| 52 |
+
CUDAPtr RAII_cudaMalloc(size_t num_bytes) {
|
| 53 |
+
void* data_ptr;
|
| 54 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaMalloc((void**)&data_ptr, num_bytes));
|
| 55 |
+
auto deleter = [](void* ptr) { AOTI_RUNTIME_DEVICE_CHECK(cudaFree(ptr)); };
|
| 56 |
+
return CUDAPtr(data_ptr, deleter);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#endif // USE_CUDA
|
| 60 |
+
|
| 61 |
+
} // anonymous namespace
|
| 62 |
+
|
| 63 |
+
AOTI_NOINLINE static void throw_exception(
|
| 64 |
+
const char* call,
|
| 65 |
+
const char* file,
|
| 66 |
+
int64_t line) {
|
| 67 |
+
std::stringstream ss;
|
| 68 |
+
ss << call << " API call failed at " << file << ", line " << line;
|
| 69 |
+
throw std::runtime_error(ss.str());
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
#define AOTI_TORCH_ERROR_CODE_CHECK(call) \
|
| 73 |
+
if ((call) != AOTI_TORCH_SUCCESS) { \
|
| 74 |
+
throw_exception(#call, __FILE__, __LINE__); \
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
using DeleterFnPtr = void (*)(void*);
|
| 78 |
+
|
| 79 |
+
namespace torch {
|
| 80 |
+
namespace aot_inductor {
|
| 81 |
+
|
| 82 |
+
inline void delete_tensor_object(void* ptr) {
|
| 83 |
+
AOTI_TORCH_ERROR_CODE_CHECK(
|
| 84 |
+
aoti_torch_delete_tensor_object(reinterpret_cast<AtenTensorHandle>(ptr)));
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
// RAIIAtenTensorHandle steals the tensor objects created by the libtorch C ABI
|
| 88 |
+
class RAIIAtenTensorHandle {
|
| 89 |
+
public:
|
| 90 |
+
RAIIAtenTensorHandle() = delete;
|
| 91 |
+
RAIIAtenTensorHandle(const RAIIAtenTensorHandle& other) = delete;
|
| 92 |
+
RAIIAtenTensorHandle& operator=(const RAIIAtenTensorHandle& other) = delete;
|
| 93 |
+
|
| 94 |
+
// Steal the ownership from another RAIIAtenTensorHandle using std::move
|
| 95 |
+
RAIIAtenTensorHandle(RAIIAtenTensorHandle&& other) = default;
|
| 96 |
+
RAIIAtenTensorHandle& operator=(RAIIAtenTensorHandle&& other) = default;
|
| 97 |
+
|
| 98 |
+
// Steal the ownership from raw AtenTensorHandle
|
| 99 |
+
RAIIAtenTensorHandle(AtenTensorHandle handle)
|
| 100 |
+
: handle_(handle, delete_tensor_object) {}
|
| 101 |
+
|
| 102 |
+
~RAIIAtenTensorHandle() {
|
| 103 |
+
handle_.reset();
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
// Return a raw AtenTensorHandle to be used by aoti_torch functions
|
| 107 |
+
// Note: this function does NOT transfer the ownership of the handle
|
| 108 |
+
operator AtenTensorHandle() const {
|
| 109 |
+
return handle_.get();
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
AtenTensorHandle release() {
|
| 113 |
+
return handle_.release();
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
AtenTensorHandle get() {
|
| 117 |
+
return handle_.get();
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
void reset() {
|
| 121 |
+
handle_.reset();
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
int64_t size(int64_t d) {
|
| 125 |
+
int64_t size;
|
| 126 |
+
AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_size(handle_.get(), d, &size));
|
| 127 |
+
return size;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
int64_t stride(int64_t d) {
|
| 131 |
+
int64_t stride;
|
| 132 |
+
AOTI_TORCH_ERROR_CODE_CHECK(
|
| 133 |
+
aoti_torch_get_stride(handle_.get(), d, &stride));
|
| 134 |
+
return stride;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
int64_t storage_offset() {
|
| 138 |
+
int64_t storage_offset;
|
| 139 |
+
AOTI_TORCH_ERROR_CODE_CHECK(
|
| 140 |
+
aoti_torch_get_storage_offset(handle_.get(), &storage_offset));
|
| 141 |
+
return storage_offset;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
private:
|
| 145 |
+
std::unique_ptr<AtenTensorOpaque, DeleterFnPtr> handle_;
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
using ConstantMap = std::unordered_map<std::string, RAIIAtenTensorHandle>;
|
| 149 |
+
|
| 150 |
+
// Steal the ownership from raw AtenTensorHandle to RAIIAtenTensorHandle
|
| 151 |
+
inline std::vector<RAIIAtenTensorHandle> steal_from_raw_handles_to_raii_handles(
|
| 152 |
+
AtenTensorHandle* handles,
|
| 153 |
+
size_t size) {
|
| 154 |
+
std::vector<RAIIAtenTensorHandle> result;
|
| 155 |
+
result.reserve(size);
|
| 156 |
+
for (size_t i = 0; i < size; i++) {
|
| 157 |
+
result.emplace_back(handles[i]);
|
| 158 |
+
handles[i] = nullptr;
|
| 159 |
+
}
|
| 160 |
+
return result;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
// Defines the base class for AOTInductorModel, which is generated by the
|
| 164 |
+
// AOTInductor cpp codegen. Since we do not need dynamic dispatch, we rely
|
| 165 |
+
// on curiously recurring template pattern (CRTP) to save some runtime
|
| 166 |
+
// v-table overhead. The generated AOTInductorModel is specialized with
|
| 167 |
+
// methods such as run_impl.
|
| 168 |
+
template <typename Model>
|
| 169 |
+
class AOTInductorModelBase {
|
| 170 |
+
public:
|
| 171 |
+
AOTInductorModelBase(
|
| 172 |
+
size_t num_inputs,
|
| 173 |
+
size_t num_outputs,
|
| 174 |
+
size_t num_constants,
|
| 175 |
+
std::optional<std::string> cubin_dir)
|
| 176 |
+
: inputs_info_(num_inputs),
|
| 177 |
+
outputs_info_(num_outputs),
|
| 178 |
+
constants_info_(num_constants),
|
| 179 |
+
cubin_dir_(cubin_dir),
|
| 180 |
+
device_idx_(-1) {
|
| 181 |
+
#ifdef USE_CUDA
|
| 182 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx_));
|
| 183 |
+
#endif // USE_CUDA
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
~AOTInductorModelBase() {
|
| 187 |
+
#ifdef USE_CUDA
|
| 188 |
+
if (run_finished_) {
|
| 189 |
+
auto code = cudaEventDestroy(*run_finished_);
|
| 190 |
+
if (code != cudaSuccess) {
|
| 191 |
+
std::cerr << "Failed to destroy CUDA event in AOTInductor model: "
|
| 192 |
+
<< cudaGetErrorString(code) << std::endl;
|
| 193 |
+
}
|
| 194 |
+
}
|
| 195 |
+
#endif // USE_CUDA
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
AOTInductorModelBase(AOTInductorModelBase&&) = delete;
|
| 199 |
+
AOTInductorModelBase& operator=(AOTInductorModelBase&&) = delete;
|
| 200 |
+
AOTInductorModelBase(const AOTInductorModelBase&) = delete;
|
| 201 |
+
AOTInductorModelBase& operator=(const AOTInductorModelBase&) = delete;
|
| 202 |
+
|
| 203 |
+
void run(
|
| 204 |
+
AtenTensorHandle*
|
| 205 |
+
input_handles, // array of input AtenTensorHandle; handles
|
| 206 |
+
// are stolen; the array itself is borrowed
|
| 207 |
+
AtenTensorHandle*
|
| 208 |
+
output_handles, // array for writing output AtenTensorHandle; handles
|
| 209 |
+
// will be stolen by the caller; the array itself is
|
| 210 |
+
// borrowed
|
| 211 |
+
DeviceStreamType stream,
|
| 212 |
+
AOTIProxyExecutorHandle proxy_executor) {
|
| 213 |
+
#ifdef USE_CUDA
|
| 214 |
+
if (!run_finished_) {
|
| 215 |
+
cudaEvent_t run_finished;
|
| 216 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished));
|
| 217 |
+
run_finished_.emplace(run_finished);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
auto* model = static_cast<Model*>(this);
|
| 221 |
+
model->run_impl(input_handles, output_handles, stream, proxy_executor);
|
| 222 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream));
|
| 223 |
+
#else // !USE_CUDA
|
| 224 |
+
run_finished_ = false;
|
| 225 |
+
auto* model = static_cast<Model*>(this);
|
| 226 |
+
model->run_impl(input_handles, output_handles, stream, proxy_executor);
|
| 227 |
+
run_finished_ = true;
|
| 228 |
+
#endif // USE_CUDA
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
void load_constants(bool is_cpu) {
|
| 232 |
+
size_t num_constants = this->num_constants();
|
| 233 |
+
constants_map_->reserve(num_constants);
|
| 234 |
+
|
| 235 |
+
std::vector<size_t> constants_internal_offset(num_constants);
|
| 236 |
+
if (!is_cpu) {
|
| 237 |
+
make_cuda_constant_blob(constants_internal_offset);
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
size_t bytes_read = 0;
|
| 241 |
+
for (size_t i = 0; i < num_constants; i++) {
|
| 242 |
+
std::string name = this->constant_name(i);
|
| 243 |
+
size_t data_size = this->constant_data_size(i);
|
| 244 |
+
uint8_t* internal_ptr = (data_size != 0)
|
| 245 |
+
? constant_ptr(constants_internal_offset[i], bytes_read, data_size)
|
| 246 |
+
: nullptr;
|
| 247 |
+
bytes_read += data_size;
|
| 248 |
+
|
| 249 |
+
// Create at::Tensor from copied memory.
|
| 250 |
+
auto dtype = this->constant_type(i);
|
| 251 |
+
auto ndim = this->constant_ndim(i);
|
| 252 |
+
auto size = this->constant_shape(i);
|
| 253 |
+
auto stride = this->constant_stride(i);
|
| 254 |
+
auto offset = this->constant_offset(i);
|
| 255 |
+
|
| 256 |
+
auto device_type = aoti_torch_device_type_cuda();
|
| 257 |
+
if (is_cpu) {
|
| 258 |
+
device_type = aoti_torch_device_type_cpu();
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
AtenTensorHandle tensor_handle;
|
| 262 |
+
int device_idx = -1; // should be the same as was used for constant_blob_
|
| 263 |
+
#ifdef USE_CUDA
|
| 264 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx));
|
| 265 |
+
#endif // USE_CUDA
|
| 266 |
+
AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob(
|
| 267 |
+
internal_ptr,
|
| 268 |
+
ndim,
|
| 269 |
+
size,
|
| 270 |
+
stride,
|
| 271 |
+
offset,
|
| 272 |
+
dtype,
|
| 273 |
+
device_type,
|
| 274 |
+
device_idx,
|
| 275 |
+
&tensor_handle));
|
| 276 |
+
constants_map_->emplace(std::move(name), tensor_handle);
|
| 277 |
+
}
|
| 278 |
+
this->update_constants_map(constants_map_);
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
#ifdef USE_CUDA
|
| 282 |
+
CUDAPtr&& release_constant_blob() {
|
| 283 |
+
return std::move(constant_blob_);
|
| 284 |
+
}
|
| 285 |
+
#endif
|
| 286 |
+
|
| 287 |
+
uint8_t* constant_ptr(
|
| 288 |
+
size_t constant_offset,
|
| 289 |
+
size_t bytes_read,
|
| 290 |
+
size_t data_size) {
|
| 291 |
+
#ifdef USE_CUDA
|
| 292 |
+
auto* constants_ptr = static_cast<uint8_t*>(constant_blob_.get());
|
| 293 |
+
uint8_t* internal_ptr = constants_ptr + constant_offset;
|
| 294 |
+
// Copy data to GPU memory
|
| 295 |
+
// TODO: Handle shared storage case.
|
| 296 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy(
|
| 297 |
+
internal_ptr,
|
| 298 |
+
_binary_constants_bin_start + bytes_read,
|
| 299 |
+
data_size,
|
| 300 |
+
cudaMemcpyHostToDevice));
|
| 301 |
+
return internal_ptr;
|
| 302 |
+
#else // !USE_CUDA
|
| 303 |
+
// get pointer to constant which is packed in model during compile time.
|
| 304 |
+
return const_cast<uint8_t*>(_binary_constants_bin_start) + bytes_read;
|
| 305 |
+
#endif // USE_CUDA
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
void make_cuda_constant_blob(std::vector<size_t>& constants_internal_offset) {
|
| 309 |
+
#ifdef USE_CUDA
|
| 310 |
+
size_t num_constants = this->num_constants();
|
| 311 |
+
// Compute required blob size with 64-alignment if on GPU.
|
| 312 |
+
size_t max_blob = 0;
|
| 313 |
+
for (size_t i = 0; i < num_constants; i++) {
|
| 314 |
+
size_t data_size = this->constant_data_size(i);
|
| 315 |
+
if (data_size % AOTI_CONST_GPU_ALIGNMENT) {
|
| 316 |
+
data_size = AOTI_CONST_GPU_ALIGNMENT +
|
| 317 |
+
(data_size / AOTI_CONST_GPU_ALIGNMENT) * AOTI_CONST_GPU_ALIGNMENT;
|
| 318 |
+
}
|
| 319 |
+
constants_internal_offset[i] = max_blob;
|
| 320 |
+
max_blob += data_size;
|
| 321 |
+
}
|
| 322 |
+
constant_blob_ = RAII_cudaMalloc(max_blob);
|
| 323 |
+
#endif // USE_CUDA
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
size_t num_inputs() const {
|
| 327 |
+
return inputs_info_.size();
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
size_t num_outputs() const {
|
| 331 |
+
return outputs_info_.size();
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
size_t num_constants() const {
|
| 335 |
+
return constants_info_.size();
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
const char* input_name(int64_t idx) const {
|
| 339 |
+
return inputs_info_.at(idx).name;
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
const char* output_name(int64_t idx) const {
|
| 343 |
+
return outputs_info_.at(idx).name;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
const char* constant_name(int64_t idx) const {
|
| 347 |
+
return constants_info_.at(idx).name;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
size_t constant_ndim(int64_t idx) {
|
| 351 |
+
return constants_info_.at(idx).shape.size();
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
const int64_t* constant_shape(int64_t idx) const {
|
| 355 |
+
return constants_info_.at(idx).shape.data();
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
const int64_t* constant_stride(int64_t idx) const {
|
| 359 |
+
return constants_info_.at(idx).stride.data();
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
int32_t constant_type(int64_t idx) const {
|
| 363 |
+
return constants_info_.at(idx).dtype;
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
size_t constant_offset(int64_t idx) const {
|
| 367 |
+
return constants_info_.at(idx).offset;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
size_t constant_data_size(int64_t idx) const {
|
| 371 |
+
return constants_info_.at(idx).data_size;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
const char* get_in_spec() const {
|
| 375 |
+
return in_spec_.c_str();
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
const char* get_out_spec() const {
|
| 379 |
+
return out_spec_.c_str();
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
void update_constants_map(std::shared_ptr<ConstantMap> constants_map) {
|
| 383 |
+
constants_map_ = std::move(constants_map);
|
| 384 |
+
if (!constants_map_) {
|
| 385 |
+
return;
|
| 386 |
+
}
|
| 387 |
+
constants_.resize(constants_info_.size());
|
| 388 |
+
int idx = 0;
|
| 389 |
+
for (const auto& info : constants_info_) {
|
| 390 |
+
const auto it = constants_map_->find(info.name);
|
| 391 |
+
if (it != constants_map_->end()) {
|
| 392 |
+
constants_[idx] = it->second;
|
| 393 |
+
}
|
| 394 |
+
idx++;
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
/// Returns true if the model is complete.
|
| 399 |
+
bool is_finished() {
|
| 400 |
+
#ifdef USE_CUDA
|
| 401 |
+
if (!run_finished_) {
|
| 402 |
+
throw std::runtime_error{"Model CUDA event was not initialized"};
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
auto event_status = cudaEventQuery(*run_finished_);
|
| 406 |
+
if (event_status == cudaSuccess) {
|
| 407 |
+
return true;
|
| 408 |
+
} else if (event_status == cudaErrorNotReady) {
|
| 409 |
+
return false;
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
throw std::runtime_error(
|
| 413 |
+
std::string("The model did not finish successfully. Error: ") +
|
| 414 |
+
cudaGetErrorString(cudaGetLastError()));
|
| 415 |
+
#else // !USE_CUDA
|
| 416 |
+
return run_finished_;
|
| 417 |
+
#endif // USE_CUDA
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
/// Synchronizes completion event.
|
| 421 |
+
void wait_for_completion() {
|
| 422 |
+
#ifdef USE_CUDA
|
| 423 |
+
if (!run_finished_) {
|
| 424 |
+
throw std::runtime_error{"Model event was not initialized"};
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
AOTI_RUNTIME_DEVICE_CHECK(cudaEventSynchronize(*run_finished_));
|
| 428 |
+
#endif // USE_CUDA
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
protected:
|
| 432 |
+
struct ParamInfo {
|
| 433 |
+
const char* name = nullptr;
|
| 434 |
+
};
|
| 435 |
+
|
| 436 |
+
struct ConstInfo {
|
| 437 |
+
const char* name = nullptr;
|
| 438 |
+
std::vector<int64_t> shape;
|
| 439 |
+
std::vector<int64_t> stride;
|
| 440 |
+
int32_t dtype;
|
| 441 |
+
int64_t offset;
|
| 442 |
+
size_t data_size;
|
| 443 |
+
};
|
| 444 |
+
|
| 445 |
+
std::vector<ParamInfo> inputs_info_;
|
| 446 |
+
std::vector<ParamInfo> outputs_info_;
|
| 447 |
+
std::vector<ConstInfo> constants_info_;
|
| 448 |
+
std::string in_spec_;
|
| 449 |
+
std::string out_spec_;
|
| 450 |
+
|
| 451 |
+
std::shared_ptr<ConstantMap> constants_map_;
|
| 452 |
+
std::vector<AtenTensorHandle> constants_;
|
| 453 |
+
|
| 454 |
+
#ifdef USE_CUDA
|
| 455 |
+
// Holds the blob storage for constants' at::Tensor for CUDA.
|
| 456 |
+
CUDAPtr constant_blob_;
|
| 457 |
+
#endif // USE_CUDA
|
| 458 |
+
|
| 459 |
+
// A directory with CUDA binary files, e.g. compiled kernels, etc.
|
| 460 |
+
const std::optional<std::string> cubin_dir_;
|
| 461 |
+
|
| 462 |
+
// Record if the model finishes an inference run so that its owning
|
| 463 |
+
// AOTModelContainer can re-use this instance.
|
| 464 |
+
#ifdef USE_CUDA
|
| 465 |
+
std::optional<cudaEvent_t> run_finished_;
|
| 466 |
+
#else // !USE_CUDA
|
| 467 |
+
bool run_finished_;
|
| 468 |
+
#endif
|
| 469 |
+
|
| 470 |
+
// Generated model uses this device index to create CUDA guards.
|
| 471 |
+
int device_idx_;
|
| 472 |
+
};
|
| 473 |
+
|
| 474 |
+
// Codegen-ed classes can derive from this to keep pointers to loaded kernels.
|
| 475 |
+
class AOTInductorModelKernelsBase {
|
| 476 |
+
public:
|
| 477 |
+
virtual ~AOTInductorModelKernelsBase() = default;
|
| 478 |
+
};
|
| 479 |
+
|
| 480 |
+
class AOTInductorModel : public AOTInductorModelBase<AOTInductorModel> {
|
| 481 |
+
public:
|
| 482 |
+
AOTInductorModel(std::shared_ptr<ConstantMap>, std::optional<std::string>);
|
| 483 |
+
|
| 484 |
+
void run_impl(
|
| 485 |
+
AtenTensorHandle*
|
| 486 |
+
input_handles, // array of input AtenTensorHandle; handles
|
| 487 |
+
// are stolen; the array itself is borrowed
|
| 488 |
+
AtenTensorHandle*
|
| 489 |
+
output_handles, // array for writing output AtenTensorHandle; handles
|
| 490 |
+
// will be stolen by the caller; the array itself is
|
| 491 |
+
// borrowed
|
| 492 |
+
DeviceStreamType stream,
|
| 493 |
+
AOTIProxyExecutorHandle proxy_executor);
|
| 494 |
+
|
| 495 |
+
static std::unique_ptr<AOTInductorModel> Create(
|
| 496 |
+
std::shared_ptr<ConstantMap> constants,
|
| 497 |
+
std::optional<std::string> cubin_dir) {
|
| 498 |
+
return std::make_unique<AOTInductorModel>(std::move(constants), cubin_dir);
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
private:
|
| 502 |
+
std::unique_ptr<AOTInductorModelKernelsBase> kernels_;
|
| 503 |
+
};
|
| 504 |
+
|
| 505 |
+
#ifdef USE_CUDA
|
| 506 |
+
class AOTICudaStreamGuard {
|
| 507 |
+
public:
|
| 508 |
+
AOTICudaStreamGuard(cudaStream_t stream, int32_t device_index) {
|
| 509 |
+
CUDAStreamGuardHandle ptr;
|
| 510 |
+
AOTI_TORCH_ERROR_CODE_CHECK(
|
| 511 |
+
aoti_torch_create_cuda_stream_guard(stream, device_index, &ptr));
|
| 512 |
+
guard_ =
|
| 513 |
+
std::unique_ptr<void, std::function<void(void*)>>(ptr, [](void* ptr) {
|
| 514 |
+
AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_delete_cuda_stream_guard(
|
| 515 |
+
reinterpret_cast<CUDAStreamGuardHandle>(ptr)));
|
| 516 |
+
});
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
private:
|
| 520 |
+
std::unique_ptr<void, std::function<void(void*)>> guard_;
|
| 521 |
+
};
|
| 522 |
+
#endif // USE_CUDA
|
| 523 |
+
|
| 524 |
+
} // namespace aot_inductor
|
| 525 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
|
| 4 |
+
|
| 5 |
+
#include <ATen/Tensor.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace aot_inductor {
|
| 9 |
+
|
| 10 |
+
// Functions declared here are not meant to be called from the AOTInductor
|
| 11 |
+
// generated model.so
|
| 12 |
+
|
| 13 |
+
// No ownership transfer, just pointer type conversion
|
| 14 |
+
TORCH_API at::Tensor* tensor_handle_to_tensor_pointer(AtenTensorHandle handle);
|
| 15 |
+
|
| 16 |
+
// No ownership transfer, just pointer type conversion
|
| 17 |
+
TORCH_API AtenTensorHandle tensor_pointer_to_tensor_handle(at::Tensor* tensor);
|
| 18 |
+
|
| 19 |
+
// unsafe_alloc_new_handles_from_tensors is used for allocating new aten
|
| 20 |
+
// tensor objects and return them as a vector of AtenTensorHandle (raw
|
| 21 |
+
// pointers), and those pointers will be stolen by model.so.
|
| 22 |
+
TORCH_API std::vector<AtenTensorHandle> unsafe_alloc_new_handles_from_tensors(
|
| 23 |
+
std::vector<at::Tensor>& tensors);
|
| 24 |
+
|
| 25 |
+
// alloc_tensors_by_stealing_from_handles is used for creating a vector of aten
|
| 26 |
+
// tensors by stealing from an array of handles. Only the handles are stolen,
|
| 27 |
+
// and the array itself is borrowed.
|
| 28 |
+
//
|
| 29 |
+
// WARNING: Can NOT be called in model.so unless in the non-ABI-compatible mode
|
| 30 |
+
TORCH_API std::vector<at::Tensor> alloc_tensors_by_stealing_from_handles(
|
| 31 |
+
AtenTensorHandle* handles,
|
| 32 |
+
size_t length);
|
| 33 |
+
|
| 34 |
+
} // namespace aot_inductor
|
| 35 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/lazy/backend/backend_device.h>
|
| 4 |
+
#include <torch/csrc/lazy/core/shape.h>
|
| 5 |
+
#include <cstring>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace lazy {
|
| 9 |
+
|
| 10 |
+
class TORCH_API BackendData {
|
| 11 |
+
public:
|
| 12 |
+
struct Info {
|
| 13 |
+
/**
|
| 14 |
+
* Used by Lazy Graph Executor to tag info on BackendData objs
|
| 15 |
+
* */
|
| 16 |
+
virtual ~Info() = default;
|
| 17 |
+
};
|
| 18 |
+
/**
|
| 19 |
+
* Represents (Tensor) data stored on a backend device
|
| 20 |
+
* in its native format.
|
| 21 |
+
* */
|
| 22 |
+
using Handle = int64_t;
|
| 23 |
+
|
| 24 |
+
BackendData(BackendDevice device, Shape shape)
|
| 25 |
+
: device_(std::move(device)), shape_(std::move(shape)) {}
|
| 26 |
+
|
| 27 |
+
virtual ~BackendData() = default;
|
| 28 |
+
|
| 29 |
+
const BackendDevice& device() const {
|
| 30 |
+
return device_;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
const Shape& shape() const {
|
| 34 |
+
return shape_;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
Info* info() const {
|
| 38 |
+
return info_.get();
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
std::shared_ptr<Info> SetInfo(std::shared_ptr<Info> info) {
|
| 42 |
+
std::swap(info, info_);
|
| 43 |
+
return info;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
virtual Handle GetHandle() = 0;
|
| 47 |
+
|
| 48 |
+
virtual void Assign(const BackendData& data) = 0;
|
| 49 |
+
|
| 50 |
+
virtual bool HasValue() const = 0;
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
BackendDevice device_;
|
| 54 |
+
Shape shape_;
|
| 55 |
+
std::shared_ptr<Info> info_;
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
using BackendDataPtr = std::shared_ptr<BackendData>;
|
| 59 |
+
|
| 60 |
+
} // namespace lazy
|
| 61 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <memory>
|
| 4 |
+
#include <ostream>
|
| 5 |
+
#include <string>
|
| 6 |
+
|
| 7 |
+
#include <ATen/Tensor.h>
|
| 8 |
+
#include <c10/macros/Export.h>
|
| 9 |
+
#include <c10/util/Deprecated.h>
|
| 10 |
+
#include <c10/util/Optional.h>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
struct Device;
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
namespace torch {
|
| 17 |
+
namespace lazy {
|
| 18 |
+
|
| 19 |
+
// Backend should extend it and define their own supported hardware types.
|
| 20 |
+
struct TORCH_API BackendDeviceType {
|
| 21 |
+
int8_t type{(int8_t)at::kCPU};
|
| 22 |
+
// Note: previous default value was '0', which actually maps to at::kCPU, at
|
| 23 |
+
// least now it is explicit, we may want to make default/undefined semantics
|
| 24 |
+
// more clear though
|
| 25 |
+
BackendDeviceType() : type((int8_t)at::kCPU) {}
|
| 26 |
+
BackendDeviceType(int8_t type) : type(type) {}
|
| 27 |
+
|
| 28 |
+
virtual ~BackendDeviceType() = default;
|
| 29 |
+
virtual std::string toString() const {
|
| 30 |
+
return "Unknown";
|
| 31 |
+
}
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
class TORCH_API BackendDevice {
|
| 35 |
+
public:
|
| 36 |
+
// The default constructor will set both the device type and ordinal
|
| 37 |
+
// to backend specific defaults.
|
| 38 |
+
BackendDevice();
|
| 39 |
+
BackendDevice(std::shared_ptr<BackendDeviceType>&& type, int64_t ordinal);
|
| 40 |
+
|
| 41 |
+
int8_t type() const;
|
| 42 |
+
int64_t ordinal() const {
|
| 43 |
+
return ordinal_;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
bool operator==(const BackendDevice& other) const {
|
| 47 |
+
return compare(other) == 0;
|
| 48 |
+
}
|
| 49 |
+
bool operator!=(const BackendDevice& other) const {
|
| 50 |
+
return compare(other) != 0;
|
| 51 |
+
}
|
| 52 |
+
bool operator<(const BackendDevice& rhs) const {
|
| 53 |
+
return compare(rhs) < 0;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
std::string toString() const;
|
| 57 |
+
|
| 58 |
+
private:
|
| 59 |
+
int compare(const BackendDevice& rhs) const;
|
| 60 |
+
|
| 61 |
+
// Use shared_ptr instead of unique_ptr so that BackendDevice can be copied.
|
| 62 |
+
std::shared_ptr<BackendDeviceType> type_;
|
| 63 |
+
int64_t ordinal_;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
TORCH_API std::ostream& operator<<(
|
| 67 |
+
std::ostream& os,
|
| 68 |
+
const BackendDevice& device);
|
| 69 |
+
|
| 70 |
+
// Helpers for converting a c10::Device to BackendDevice and vice versa.
|
| 71 |
+
TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device);
|
| 72 |
+
TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device);
|
| 73 |
+
|
| 74 |
+
// Tries to extract the backend device out of the lazy tensor. Returns nullopt
|
| 75 |
+
// if the input is not a lazy tensor.
|
| 76 |
+
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
|
| 77 |
+
const at::ITensorListRef tensors);
|
| 78 |
+
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
|
| 79 |
+
const at::TensorList tensors);
|
| 80 |
+
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
|
| 81 |
+
const at::Tensor& tensor);
|
| 82 |
+
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
|
| 83 |
+
const c10::optional<c10::Device>& device);
|
| 84 |
+
|
| 85 |
+
// For variadic template.
|
| 86 |
+
TORCH_API c10::optional<BackendDevice> GetBackendDevice();
|
| 87 |
+
|
| 88 |
+
template <typename T, typename... Args>
|
| 89 |
+
c10::optional<BackendDevice> GetBackendDevice(
|
| 90 |
+
const T& tensor,
|
| 91 |
+
const Args&... forward_tensors) {
|
| 92 |
+
auto optional_device = GetBackendDevice(tensor);
|
| 93 |
+
if (optional_device) {
|
| 94 |
+
return optional_device;
|
| 95 |
+
}
|
| 96 |
+
return GetBackendDevice(forward_tensors...);
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
} // namespace lazy
|
| 100 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <torch/csrc/lazy/backend/backend_data.h>
|
| 5 |
+
#include <torch/csrc/lazy/backend/backend_device.h>
|
| 6 |
+
#include <torch/csrc/lazy/backend/lowering_context.h>
|
| 7 |
+
#include <torch/csrc/lazy/core/lazy_graph_executor.h>
|
| 8 |
+
#include <torch/csrc/lazy/core/shape.h>
|
| 9 |
+
#include <torch/csrc/lazy/core/tensor.h>
|
| 10 |
+
#include <atomic>
|
| 11 |
+
|
| 12 |
+
namespace torch {
|
| 13 |
+
namespace lazy {
|
| 14 |
+
|
| 15 |
+
struct IrBuilder;
|
| 16 |
+
|
| 17 |
+
/**
|
| 18 |
+
* Work in progress- don't treat this as a stable interface yet!
|
| 19 |
+
*/
|
| 20 |
+
class TORCH_API BackendImplInterface {
|
| 21 |
+
public:
|
| 22 |
+
virtual ~BackendImplInterface() = default;
|
| 23 |
+
|
| 24 |
+
/**
|
| 25 |
+
* Initialization/Teardown
|
| 26 |
+
* */
|
| 27 |
+
// No-op by default. Allows custom functionality to be exposed through
|
| 28 |
+
// extension bindings.
|
| 29 |
+
virtual void InitializeAtenBindings() const {}
|
| 30 |
+
|
| 31 |
+
virtual void PrepareToExit() const = 0;
|
| 32 |
+
|
| 33 |
+
/**
|
| 34 |
+
* Configuration
|
| 35 |
+
* */
|
| 36 |
+
|
| 37 |
+
virtual void SetRngSeed(size_t seed) const = 0;
|
| 38 |
+
|
| 39 |
+
/**
|
| 40 |
+
* IR Tracing
|
| 41 |
+
* */
|
| 42 |
+
|
| 43 |
+
virtual const IrBuilder* GetIrBuilder() const = 0;
|
| 44 |
+
|
| 45 |
+
/**
|
| 46 |
+
* Data Transfer
|
| 47 |
+
* */
|
| 48 |
+
|
| 49 |
+
virtual BackendDataPtr MakeComputationDataFromTensor(
|
| 50 |
+
const at::Tensor& tensor,
|
| 51 |
+
const Shape& shape,
|
| 52 |
+
const BackendDevice& device) const = 0;
|
| 53 |
+
virtual BackendDataPtr MakeComputationDataFromScalar(
|
| 54 |
+
const at::Scalar& scalar,
|
| 55 |
+
const torch::lazy::BackendDevice& device) const = 0;
|
| 56 |
+
virtual BackendDataPtr CreateDataPlaceholder(
|
| 57 |
+
const BackendDevice& device,
|
| 58 |
+
const Shape& shape) const = 0;
|
| 59 |
+
|
| 60 |
+
// Gets backend data if the node is a device data node. Otherwise returns
|
| 61 |
+
// nullptr
|
| 62 |
+
virtual BackendDataPtr GetComputationDataFromNode(const Node*) const = 0;
|
| 63 |
+
|
| 64 |
+
virtual at::Tensor MakeTensorFromComputationData(
|
| 65 |
+
const BackendDataPtr data,
|
| 66 |
+
c10::optional<at::ScalarType> logical_scalar_type) const = 0;
|
| 67 |
+
|
| 68 |
+
/**
|
| 69 |
+
* Lowering, Compilation, Execution
|
| 70 |
+
* */
|
| 71 |
+
|
| 72 |
+
virtual std::unique_ptr<LoweringContext> CreateLoweringContext(
|
| 73 |
+
const std::string& name,
|
| 74 |
+
BackendDevice device,
|
| 75 |
+
c10::ArrayRef<const torch::lazy::Node*> post_order,
|
| 76 |
+
Util::EmissionMap emit_status) const = 0;
|
| 77 |
+
|
| 78 |
+
virtual std::unique_ptr<LoweringContext> CreateLoweringContext(
|
| 79 |
+
const std::string& name,
|
| 80 |
+
BackendDevice device) const = 0;
|
| 81 |
+
|
| 82 |
+
// TODO(whc) need to keep this?
|
| 83 |
+
virtual std::vector<std::string> GetCompilationDevices(
|
| 84 |
+
const std::string& device,
|
| 85 |
+
c10::ArrayRef<std::string> devices) const = 0;
|
| 86 |
+
|
| 87 |
+
virtual std::vector<ComputationPtr> Compile(
|
| 88 |
+
std::vector<ComputationPtr> instances) const = 0;
|
| 89 |
+
|
| 90 |
+
virtual std::vector<BackendDataPtr> ExecuteComputation(
|
| 91 |
+
torch::lazy::ComputationPtr computation,
|
| 92 |
+
c10::ArrayRef<BackendDataPtr> arguments,
|
| 93 |
+
const BackendDevice& device) const = 0;
|
| 94 |
+
|
| 95 |
+
/**
|
| 96 |
+
* Device Configuration
|
| 97 |
+
* */
|
| 98 |
+
|
| 99 |
+
// Set or get the default device type.
|
| 100 |
+
// For backends used with virtual c10::Devices, this configures what real
|
| 101 |
+
// device type the backend should use, and matters if the backend supports
|
| 102 |
+
// more than one type of real device.
|
| 103 |
+
virtual std::shared_ptr<BackendDeviceType> GetDefaultDeviceType() const = 0;
|
| 104 |
+
virtual void SetDefaultDeviceType(int8_t type) = 0;
|
| 105 |
+
|
| 106 |
+
// Set or get the default device ordinal.
|
| 107 |
+
// For backends that supports multi-device, this configures what the
|
| 108 |
+
// default device the backend should use.
|
| 109 |
+
virtual int64_t GetDefaultDeviceOrdinal() const = 0;
|
| 110 |
+
virtual void SetDefaultDeviceOrdinal(int64_t) = 0;
|
| 111 |
+
|
| 112 |
+
// Specify which aten device should be used for eager fallback
|
| 113 |
+
// may change depending on current 'Default' DeviceType
|
| 114 |
+
virtual at::DeviceType EagerFallbackDeviceType() const = 0;
|
| 115 |
+
|
| 116 |
+
// Query all available backend devices
|
| 117 |
+
virtual std::vector<BackendDevice> GetBackendDevices() const = 0;
|
| 118 |
+
|
| 119 |
+
virtual std::string CreateMetricReport() const {
|
| 120 |
+
return "";
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
// Map a particular c10:: device to a concrete backend device
|
| 124 |
+
// Note:: c10:: devices may be virtual or concrete. xla:: and lazy:: are
|
| 125 |
+
// virtual devices, meaning they may map to a gpu, tpu, etc. behind the
|
| 126 |
+
// scenes. In the future, non-virtual c10:: devices may also use lazy tensors
|
| 127 |
+
// through a mode, in which case these APIs should still work, but should be
|
| 128 |
+
// identity mappings.
|
| 129 |
+
virtual BackendDevice GetBackendDevice(c10::Device device) const = 0;
|
| 130 |
+
|
| 131 |
+
// TODO(whc)
|
| 132 |
+
// Additional APIs expected for supporting distributed training, to be
|
| 133 |
+
// designed
|
| 134 |
+
|
| 135 |
+
/**
|
| 136 |
+
* Debug/Metrics
|
| 137 |
+
* */
|
| 138 |
+
|
| 139 |
+
// virtual std::map<std::string, Metric> GetMetrics() const = 0;
|
| 140 |
+
|
| 141 |
+
// virtual MemoryInfo GetMemoryInfo(const std::string& device) = 0;
|
| 142 |
+
|
| 143 |
+
virtual std::string GetComputationBackendText(
|
| 144 |
+
const ComputationPtr computation) const = 0;
|
| 145 |
+
};
|
| 146 |
+
|
| 147 |
+
class TORCH_API BackendRegistrar {
|
| 148 |
+
public:
|
| 149 |
+
BackendRegistrar(const BackendImplInterface* backend_impl_interface);
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
TORCH_API bool hasBackend();
|
| 153 |
+
TORCH_API const BackendImplInterface* getBackend();
|
| 154 |
+
|
| 155 |
+
TORCH_API const IrBuilder* getIrBuilder();
|
| 156 |
+
|
| 157 |
+
} // namespace lazy
|
| 158 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Cache utils in this file is adapted from PyTorch/XLA
|
| 3 |
+
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/cache.h
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <functional>
|
| 9 |
+
#include <list>
|
| 10 |
+
#include <memory>
|
| 11 |
+
#include <mutex>
|
| 12 |
+
#include <unordered_map>
|
| 13 |
+
#include <utility>
|
| 14 |
+
|
| 15 |
+
namespace torch {
|
| 16 |
+
namespace lazy {
|
| 17 |
+
|
| 18 |
+
// Generic key and object cache with LRU expiration policy. The objects of type
|
| 19 |
+
// T will be stored as std::shared_ptr<T> and taken and returned as such, by the
|
| 20 |
+
// cache API.
|
| 21 |
+
template <
|
| 22 |
+
typename K,
|
| 23 |
+
typename T,
|
| 24 |
+
typename H = std::hash<K>,
|
| 25 |
+
typename E = std::equal_to<K>>
|
| 26 |
+
class Cache {
|
| 27 |
+
public:
|
| 28 |
+
using TypePtr = std::shared_ptr<T>;
|
| 29 |
+
using Element = std::pair<K, TypePtr>;
|
| 30 |
+
|
| 31 |
+
explicit Cache(size_t max_size) : max_size_(max_size) {}
|
| 32 |
+
|
| 33 |
+
// Adds an object to the cache, unless it already exists. If the cache grows
|
| 34 |
+
// beyond the limit set during construction, the oldest used object will be
|
| 35 |
+
// removed from the cache.
|
| 36 |
+
TypePtr Add(K key, TypePtr object) {
|
| 37 |
+
if (!max_size_) {
|
| 38 |
+
return object;
|
| 39 |
+
}
|
| 40 |
+
std::lock_guard<std::mutex> slock(lock_);
|
| 41 |
+
element_list_.emplace_front(Element(std::move(key), std::move(object)));
|
| 42 |
+
auto it = element_list_.begin();
|
| 43 |
+
auto emplace_result = element_map_.emplace(&it->first, it);
|
| 44 |
+
if (!emplace_result.second) {
|
| 45 |
+
element_list_.erase(it);
|
| 46 |
+
DoLRU(emplace_result.first->second);
|
| 47 |
+
} else if (element_list_.size() > max_size_) {
|
| 48 |
+
Element* last = &element_list_.back();
|
| 49 |
+
element_map_.erase(&last->first);
|
| 50 |
+
element_list_.pop_back();
|
| 51 |
+
}
|
| 52 |
+
return emplace_result.first->second->second;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// Retrieves the existing object if it exists. If it does, its position in
|
| 56 |
+
// the LRU list gets moved to the head of the list.
|
| 57 |
+
// Returns nullptr if no object with the specified key is found within the
|
| 58 |
+
// cache.
|
| 59 |
+
TypePtr Get(const K& key) {
|
| 60 |
+
if (!max_size_) {
|
| 61 |
+
return nullptr;
|
| 62 |
+
}
|
| 63 |
+
std::lock_guard<std::mutex> slock(lock_);
|
| 64 |
+
auto it = element_map_.find(&key);
|
| 65 |
+
if (it == element_map_.end()) {
|
| 66 |
+
return nullptr;
|
| 67 |
+
}
|
| 68 |
+
DoLRU(it->second);
|
| 69 |
+
return it->second->second;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
TypePtr GetLatest() {
|
| 73 |
+
std::lock_guard<std::mutex> g(lock_);
|
| 74 |
+
TORCH_CHECK(!element_list_.empty());
|
| 75 |
+
return element_list_.front().second;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
bool Erase(const K& key) {
|
| 79 |
+
if (!max_size_) {
|
| 80 |
+
return false;
|
| 81 |
+
}
|
| 82 |
+
std::lock_guard<std::mutex> slock(lock_);
|
| 83 |
+
auto it = element_map_.find(&key);
|
| 84 |
+
if (it == element_map_.end()) {
|
| 85 |
+
return false;
|
| 86 |
+
}
|
| 87 |
+
auto lit = it->second;
|
| 88 |
+
element_map_.erase(it);
|
| 89 |
+
element_list_.erase(lit);
|
| 90 |
+
return true;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
void Clear() {
|
| 94 |
+
if (!max_size_) {
|
| 95 |
+
return;
|
| 96 |
+
}
|
| 97 |
+
std::lock_guard<std::mutex> slock(lock_);
|
| 98 |
+
element_map_.clear();
|
| 99 |
+
element_list_.clear();
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
int Numel() const {
|
| 103 |
+
if (!max_size_) {
|
| 104 |
+
return 0;
|
| 105 |
+
}
|
| 106 |
+
std::lock_guard<std::mutex> g(lock_);
|
| 107 |
+
TORCH_CHECK(element_map_.size() == element_list_.size());
|
| 108 |
+
return element_map_.size();
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
private:
|
| 112 |
+
using ElementList = std::list<Element>;
|
| 113 |
+
|
| 114 |
+
struct Hasher {
|
| 115 |
+
size_t operator()(const K* key) const {
|
| 116 |
+
return hasher(*key);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
H hasher;
|
| 120 |
+
};
|
| 121 |
+
|
| 122 |
+
struct Equaler {
|
| 123 |
+
bool operator()(const K* k1, const K* k2) const {
|
| 124 |
+
return equaler(*k1, *k2);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
E equaler;
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
using ElementMap = std::
|
| 131 |
+
unordered_map<const K*, typename ElementList::iterator, Hasher, Equaler>;
|
| 132 |
+
|
| 133 |
+
void DoLRU(typename ElementList::iterator it) {
|
| 134 |
+
element_list_.splice(element_list_.begin(), element_list_, it);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
mutable std::mutex lock_;
|
| 138 |
+
const size_t max_size_ = 0;
|
| 139 |
+
ElementList element_list_;
|
| 140 |
+
ElementMap element_map_;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
} // namespace lazy
|
| 144 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/macros/Export.h>
|
| 3 |
+
#include <c10/util/Flags.h>
|
| 4 |
+
|
| 5 |
+
C10_DECLARE_bool(torch_lazy_ir_debug);
|
| 6 |
+
C10_DECLARE_bool(torch_lazy_handle_special_scalars);
|
| 7 |
+
C10_DECLARE_bool(torch_lazy_all_numbers_special_scalars);
|
| 8 |
+
C10_DECLARE_bool(torch_lazy_param_aliasing);
|
| 9 |
+
C10_DECLARE_bool(torch_lazy_reuse_ir);
|
| 10 |
+
C10_DECLARE_bool(torch_lazy_use_thread_pool);
|
| 11 |
+
C10_DECLARE_bool(torch_lazy_enable_device_data_cache);
|
| 12 |
+
|
| 13 |
+
C10_DECLARE_int(torch_lazy_compilation_cache_size);
|
| 14 |
+
C10_DECLARE_int(torch_lazy_device_data_cache_size);
|
| 15 |
+
C10_DECLARE_int(torch_lazy_io_thread_pool_size);
|
| 16 |
+
C10_DECLARE_int(torch_lazy_metrics_samples);
|
| 17 |
+
C10_DECLARE_int(torch_lazy_trim_graph_check_frequency);
|
| 18 |
+
C10_DECLARE_int(torch_lazy_trim_graph_size);
|
| 19 |
+
|
| 20 |
+
C10_DECLARE_string(torch_lazy_metrics_percentiles);
|
| 21 |
+
|
| 22 |
+
C10_DECLARE_int(torch_lazy_shape_cache_size);
|
| 23 |
+
|
| 24 |
+
namespace torch {
|
| 25 |
+
namespace lazy {
|
| 26 |
+
TORCH_API std::string& getLTCForceFallback();
|
| 27 |
+
}
|
| 28 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/lazy/core/ir.h>
|
| 4 |
+
|
| 5 |
+
#include <string>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace lazy {
|
| 9 |
+
|
| 10 |
+
class BackendDevice;
|
| 11 |
+
|
| 12 |
+
class TORCH_API DumpUtil {
|
| 13 |
+
public:
|
| 14 |
+
static std::string ToDot(c10::ArrayRef<const Node*> nodes);
|
| 15 |
+
|
| 16 |
+
static std::string PostOrderToDot(
|
| 17 |
+
c10::ArrayRef<const Node*> post_order,
|
| 18 |
+
c10::ArrayRef<const Node*> roots);
|
| 19 |
+
|
| 20 |
+
static std::string ToText(c10::ArrayRef<const Node*> nodes);
|
| 21 |
+
|
| 22 |
+
static std::string PostOrderToText(
|
| 23 |
+
c10::ArrayRef<const Node*> post_order,
|
| 24 |
+
c10::ArrayRef<const Node*> roots);
|
| 25 |
+
|
| 26 |
+
static std::string ToBackend(
|
| 27 |
+
c10::ArrayRef<Value> values,
|
| 28 |
+
const BackendDevice& device);
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
} // namespace lazy
|
| 32 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Optional.h>
|
| 4 |
+
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace lazy {
|
| 10 |
+
struct SourceLocation {
|
| 11 |
+
std::string file;
|
| 12 |
+
std::string function;
|
| 13 |
+
int line = -1;
|
| 14 |
+
};
|
| 15 |
+
|
| 16 |
+
TORCH_API void EmitShortFrameInfo(
|
| 17 |
+
std::ostream& stream,
|
| 18 |
+
const std::vector<SourceLocation>& frames);
|
| 19 |
+
|
| 20 |
+
TORCH_API std::ostream& operator<<(
|
| 21 |
+
std::ostream& stream,
|
| 22 |
+
const std::vector<SourceLocation>& frames);
|
| 23 |
+
|
| 24 |
+
// The base class for user defined metadata which is possible to attach to IR
|
| 25 |
+
// nodes.
|
| 26 |
+
struct TORCH_API UserMetaData {
|
| 27 |
+
virtual ~UserMetaData() = default;
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
struct TORCH_API MetaData {
|
| 31 |
+
std::string scope;
|
| 32 |
+
std::vector<SourceLocation> frame_info;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
// TODO(whc) is this going to be used outside of in IR decompositions?
|
| 36 |
+
// RAII data structure to be used a stack variable to enter a new IR scope. IR
|
| 37 |
+
// scope names will appear in the IR and will help identifying the source of the
|
| 38 |
+
// single IR nodes.
|
| 39 |
+
struct TORCH_API ScopePusher {
|
| 40 |
+
explicit ScopePusher(const std::string& name);
|
| 41 |
+
~ScopePusher();
|
| 42 |
+
|
| 43 |
+
static void ResetScopes();
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
TORCH_API MetaData GetMetaDataIfDebugging();
|
| 47 |
+
|
| 48 |
+
} // namespace lazy
|
| 49 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h
ADDED
|
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/ArrayRef.h>
|
| 4 |
+
#include <torch/csrc/lazy/backend/lowering_context.h>
|
| 5 |
+
#include <torch/csrc/lazy/core/cache.h>
|
| 6 |
+
#include <torch/csrc/lazy/core/ir_util.h>
|
| 7 |
+
#include <torch/csrc/lazy/core/multi_wait.h>
|
| 8 |
+
#include <torch/csrc/lazy/core/tensor.h>
|
| 9 |
+
#include <torch/csrc/lazy/core/util.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace lazy {
|
| 13 |
+
|
| 14 |
+
class TORCH_API LazyGraphExecutor {
|
| 15 |
+
public:
|
| 16 |
+
struct DeviceDataInfo : public BackendData::Info {
|
| 17 |
+
DeviceDataInfo(int64_t tensor_id, bool read_only)
|
| 18 |
+
: tensor_id(tensor_id), read_only(read_only) {}
|
| 19 |
+
|
| 20 |
+
int64_t tensor_id = 0;
|
| 21 |
+
bool read_only = false;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
// Register a lazy graph executor instance that can be retrieved using Get()
|
| 25 |
+
static void Register(LazyGraphExecutor*);
|
| 26 |
+
static LazyGraphExecutor* Get();
|
| 27 |
+
|
| 28 |
+
virtual ~LazyGraphExecutor() = default;
|
| 29 |
+
|
| 30 |
+
// Override these methods to perform custom tensor registration and
|
| 31 |
+
// unregistration Note: It is vital that the parent implementations are also
|
| 32 |
+
// called in order for the tensors to show up in the live tensor list
|
| 33 |
+
virtual void RegisterTensor(std::shared_ptr<LazyTensor::Data> data);
|
| 34 |
+
virtual void UnregisterTensor(LazyTensor::Data* data);
|
| 35 |
+
|
| 36 |
+
// Seed for random generator.
|
| 37 |
+
// Override to supply your own DeviceContextArena.
|
| 38 |
+
virtual Value GetRngSeed(const BackendDevice& device);
|
| 39 |
+
virtual uint64_t GetRunningSeed(const BackendDevice& device);
|
| 40 |
+
virtual void SetRngSeed(const BackendDevice& device, uint64_t seed);
|
| 41 |
+
|
| 42 |
+
void DeviceBarrier(const BackendDevice& device);
|
| 43 |
+
|
| 44 |
+
BackendDataPtr GetDeviceData(
|
| 45 |
+
const at::Tensor& tensor,
|
| 46 |
+
const BackendDevice& device);
|
| 47 |
+
|
| 48 |
+
BackendDataPtr GetDeviceData(
|
| 49 |
+
const at::Scalar& value,
|
| 50 |
+
at::ScalarType scalar_type,
|
| 51 |
+
const BackendDevice& device);
|
| 52 |
+
|
| 53 |
+
// Retrieves the set of lazy tensors which are currently live in the system,
|
| 54 |
+
// for the given device. If device is nullptr, the live tensors for all
|
| 55 |
+
// devices will be returned. Returned tensors are sorted by device as primary
|
| 56 |
+
// key, and by unique ID as secondary key.
|
| 57 |
+
std::vector<LazyTensorPtr> GetLiveTensors(const BackendDevice* device);
|
| 58 |
+
|
| 59 |
+
// Makes sure that any outstanding IR operation accumulated over live tensors,
|
| 60 |
+
// gets turned into device data. If wait is true, the sync operation will be
|
| 61 |
+
// run synchronously. The devices argument, if not empty, tells the devices
|
| 62 |
+
// which should be partecipating into the replicated computation.
|
| 63 |
+
virtual void SyncLiveTensorsGraph(
|
| 64 |
+
const BackendDevice* device,
|
| 65 |
+
c10::ArrayRef<std::string> devices,
|
| 66 |
+
bool wait);
|
| 67 |
+
|
| 68 |
+
// Applies all the pending IR operations queued over the input tensors. All
|
| 69 |
+
// the tensors must be on the same device. If wait is true, the sync operation
|
| 70 |
+
// will be run synchronously. The devices argument, if not empty, tells the
|
| 71 |
+
// devices which should be partecipating into the replicated computation.
|
| 72 |
+
void SyncTensorsGraph(
|
| 73 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 74 |
+
c10::ArrayRef<std::string> devices,
|
| 75 |
+
bool wait,
|
| 76 |
+
bool sync_ltc_data);
|
| 77 |
+
|
| 78 |
+
// Marks an execution step, which allows the tensor framework to understand
|
| 79 |
+
// the computation boundaries.
|
| 80 |
+
// Override to supply your own DeviceContextArena.
|
| 81 |
+
virtual void MarkStep(const BackendDevice& device);
|
| 82 |
+
|
| 83 |
+
// Waits for all the outstanding operations on all the supplied devices.
|
| 84 |
+
// If devices is empty, the wait will happen for all local devices.
|
| 85 |
+
void WaitDeviceOps(c10::ArrayRef<BackendDevice> devices);
|
| 86 |
+
|
| 87 |
+
// Retrieves the PyTorch CPU tensors behind the lazy tensors IR operations.
|
| 88 |
+
// All the tensors must be on the same device.
|
| 89 |
+
std::vector<at::Tensor> GetTensors(std::vector<LazyTensorPtr>* tensors);
|
| 90 |
+
|
| 91 |
+
size_t IncTrimCounter() const;
|
| 92 |
+
|
| 93 |
+
// Dumps the backend specific text of the computation accumulated in the graph
|
| 94 |
+
// which is attached the tensors.
|
| 95 |
+
std::string DumpBackendComputation(const std::vector<LazyTensorPtr>& tensors);
|
| 96 |
+
|
| 97 |
+
Value GetDeviceDataIrValue(
|
| 98 |
+
const at::Scalar& value,
|
| 99 |
+
c10::ScalarType type,
|
| 100 |
+
const BackendDevice& device);
|
| 101 |
+
Value GetIrValueForScalar(
|
| 102 |
+
const at::Scalar& value,
|
| 103 |
+
c10::ScalarType type,
|
| 104 |
+
const BackendDevice& device);
|
| 105 |
+
Value GetIrValueForScalar(
|
| 106 |
+
const at::Scalar& value,
|
| 107 |
+
const BackendDevice& device);
|
| 108 |
+
|
| 109 |
+
// TODO: even though this API is currently used **only** in codegen to
|
| 110 |
+
// generate real scalar IR values vs scalar tensors, we would like to
|
| 111 |
+
// use it in other cases where `GetIrValueForXXXScalar` is used, as well
|
| 112 |
+
// In order to do that, we need to untangle the cases where we don't need
|
| 113 |
+
// `expand` and where we don't expect a scalar tensor
|
| 114 |
+
Value GetIrValueForScalarFromCodegen(
|
| 115 |
+
const at::Scalar& value,
|
| 116 |
+
const BackendDevice& device);
|
| 117 |
+
Value GetIrValueForExpandedScalar(
|
| 118 |
+
const at::Scalar& value,
|
| 119 |
+
const Shape& shape,
|
| 120 |
+
const BackendDevice& device);
|
| 121 |
+
|
| 122 |
+
struct CachedComputation {
|
| 123 |
+
explicit CachedComputation(ComputationPtr computation)
|
| 124 |
+
: computation(std::move(computation)) {}
|
| 125 |
+
|
| 126 |
+
ComputationPtr computation;
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
using ComputationCache = Cache<hash_t, CachedComputation, HashReducer>;
|
| 130 |
+
|
| 131 |
+
ComputationCache* GetComputationCache();
|
| 132 |
+
|
| 133 |
+
hash_t GetGraphHash(const std::vector<LazyTensorPtr>& tensors);
|
| 134 |
+
|
| 135 |
+
protected:
|
| 136 |
+
// TODO(alanwaketan): Revisit if all of them need to be accessible to
|
| 137 |
+
// derived classes.
|
| 138 |
+
|
| 139 |
+
struct SyncTensorsConfig {
|
| 140 |
+
// Whether we want to force data on the target tensors (hence trimming
|
| 141 |
+
// the IR graph above them).
|
| 142 |
+
bool force_ltc_data = true;
|
| 143 |
+
// Whether when setting the data, the other properties of the tensor
|
| 144 |
+
// state should be reset.
|
| 145 |
+
bool sync_ltc_data = true;
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
struct SyncTensorCollection {
|
| 149 |
+
SyncTensorCollection() : hash(0) {}
|
| 150 |
+
|
| 151 |
+
SyncTensorsConfig config;
|
| 152 |
+
std::vector<size_t> indices;
|
| 153 |
+
hash_t hash;
|
| 154 |
+
std::vector<ExceptionCleanup> unlocker;
|
| 155 |
+
BackendDevice device;
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
struct PostOrderData {
|
| 159 |
+
std::vector<const Node*> post_order;
|
| 160 |
+
Util::EmissionMap emission_map;
|
| 161 |
+
std::vector<BackendDataPtr> parameters_data;
|
| 162 |
+
std::vector<size_t> parameter_sequence;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
// Locking:
|
| 166 |
+
// We perform two kinds of operations of tensors, synchronous and
|
| 167 |
+
// asynchronous. The ApplyPendingGraph() are synchronous, as we need the
|
| 168 |
+
// device data result immediately. Before the synchronous operations can
|
| 169 |
+
// start, they need to wait that the pending asynchronous operations have
|
| 170 |
+
// completed. Synchronous operations do not hold device locks, since they are
|
| 171 |
+
// strictly sequential, dictated by the PyTorch execution order. The
|
| 172 |
+
// SyncTensorsGraph() is asynchronous, and returns immediately after having
|
| 173 |
+
// scheduled the asynchronous operation. While executing, the asynchronous
|
| 174 |
+
// operations will hold locks on all the participating devices (in most common
|
| 175 |
+
// cases there will be only one device).
|
| 176 |
+
// Since asynchronous operations capture device locks, only one asynchronous
|
| 177 |
+
// operation can execute at the same time, on a given device. Tensor
|
| 178 |
+
// operations which send data to device do not need to hold any device locks
|
| 179 |
+
// while doing so. Only operations which _use_ device data (computations, and
|
| 180 |
+
// transfer from server) need to wait for asynchronous operations to complete
|
| 181 |
+
// (barrier).
|
| 182 |
+
|
| 183 |
+
class DeviceLocker {
|
| 184 |
+
public:
|
| 185 |
+
explicit DeviceLocker(BackendDevice device) : device_(std::move(device)) {}
|
| 186 |
+
|
| 187 |
+
const BackendDevice& device() const {
|
| 188 |
+
return device_;
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
void Lock();
|
| 192 |
+
void Unlock(std::exception_ptr exptr);
|
| 193 |
+
void Barrier();
|
| 194 |
+
|
| 195 |
+
private:
|
| 196 |
+
void CheckResetException();
|
| 197 |
+
|
| 198 |
+
BackendDevice device_;
|
| 199 |
+
std::mutex mutex_;
|
| 200 |
+
std::condition_variable cv_;
|
| 201 |
+
bool locked_ = false;
|
| 202 |
+
std::exception_ptr exptr_;
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
class DeviceLockerArena {
|
| 206 |
+
public:
|
| 207 |
+
static DeviceLockerArena* Get();
|
| 208 |
+
|
| 209 |
+
std::shared_ptr<DeviceLocker> GetLocker(const BackendDevice& device);
|
| 210 |
+
|
| 211 |
+
void DeviceBarrier(const BackendDevice& device);
|
| 212 |
+
|
| 213 |
+
// Use a set to impose an order on the device locking sequence (ABBA
|
| 214 |
+
// prevention).
|
| 215 |
+
std::vector<ExceptionCleanup> LockDevices(
|
| 216 |
+
const std::set<BackendDevice>& devices);
|
| 217 |
+
|
| 218 |
+
private:
|
| 219 |
+
ExceptionCleanup LockDevice(const BackendDevice& device);
|
| 220 |
+
|
| 221 |
+
std::mutex mutex_;
|
| 222 |
+
std::map<BackendDevice, std::shared_ptr<DeviceLocker>> lockers_;
|
| 223 |
+
};
|
| 224 |
+
|
| 225 |
+
class DataCacheArena {
|
| 226 |
+
public:
|
| 227 |
+
static DataCacheArena* Get();
|
| 228 |
+
|
| 229 |
+
BackendDataPtr GetDeviceData(
|
| 230 |
+
const at::Tensor& tensor,
|
| 231 |
+
const BackendDevice& device);
|
| 232 |
+
|
| 233 |
+
BackendDataPtr GetDeviceData(
|
| 234 |
+
const at::Scalar& value,
|
| 235 |
+
at::ScalarType scalar_type,
|
| 236 |
+
const BackendDevice& device);
|
| 237 |
+
|
| 238 |
+
private:
|
| 239 |
+
struct TensorHasher {
|
| 240 |
+
size_t operator()(const at::Tensor& tensor) const;
|
| 241 |
+
};
|
| 242 |
+
struct TensorComparer {
|
| 243 |
+
bool operator()(const at::Tensor& tensor1, const at::Tensor& tensor2)
|
| 244 |
+
const;
|
| 245 |
+
};
|
| 246 |
+
|
| 247 |
+
explicit DataCacheArena(size_t max_cache_size);
|
| 248 |
+
|
| 249 |
+
using DataCache =
|
| 250 |
+
Cache<at::Tensor, BackendData, TensorHasher, TensorComparer>;
|
| 251 |
+
|
| 252 |
+
DataCache* GetDataCache(const BackendDevice& device);
|
| 253 |
+
|
| 254 |
+
size_t max_cache_size_ = 0;
|
| 255 |
+
std::mutex mutex_;
|
| 256 |
+
std::map<BackendDevice, std::unique_ptr<DataCache>> device_caches_;
|
| 257 |
+
};
|
| 258 |
+
|
| 259 |
+
// The DeviceContextArena holds per device live information and statistics,
|
| 260 |
+
// among which the lazy tensors which are currently alive in the system. This
|
| 261 |
+
// is used to create computation "barriers" in order to flush pending
|
| 262 |
+
// operations and ensure the same computations are created during the training
|
| 263 |
+
// loops.
|
| 264 |
+
// TODO(alanwaketan): Add a registry such that we don't need to make all
|
| 265 |
+
// related methods virtual.
|
| 266 |
+
class DeviceContextArena {
|
| 267 |
+
protected:
|
| 268 |
+
struct DeviceContext {
|
| 269 |
+
std::mutex lock;
|
| 270 |
+
std::map<int64_t, std::weak_ptr<LazyTensor::Data>> tensors_data;
|
| 271 |
+
uint64_t seed = 101;
|
| 272 |
+
uint64_t running_seed = 101;
|
| 273 |
+
Value seed_ir_value;
|
| 274 |
+
};
|
| 275 |
+
|
| 276 |
+
public:
|
| 277 |
+
static DeviceContextArena* Get();
|
| 278 |
+
virtual ~DeviceContextArena() = default;
|
| 279 |
+
|
| 280 |
+
void RegisterTensor(std::shared_ptr<LazyTensor::Data> data);
|
| 281 |
+
void UnregisterTensor(LazyTensor::Data* data);
|
| 282 |
+
|
| 283 |
+
std::vector<LazyTensorPtr> GetLiveTensors(const BackendDevice* device);
|
| 284 |
+
|
| 285 |
+
// Overriding it allow derived class to use their own IRs for Value.
|
| 286 |
+
virtual Value GetRngSeed(const BackendDevice& device);
|
| 287 |
+
uint64_t GetRunningSeed(const BackendDevice& device);
|
| 288 |
+
void SetRngSeed(const BackendDevice& device, uint64_t seed);
|
| 289 |
+
|
| 290 |
+
void MarkStep(const BackendDevice& device);
|
| 291 |
+
|
| 292 |
+
std::vector<BackendDevice> GetActiveDevices();
|
| 293 |
+
|
| 294 |
+
protected:
|
| 295 |
+
DeviceContext* GetDeviceContext(const BackendDevice& device);
|
| 296 |
+
|
| 297 |
+
void ForAllDeviceContexts(
|
| 298 |
+
const std::function<void(DeviceContext*)>& fn,
|
| 299 |
+
const BackendDevice* device);
|
| 300 |
+
|
| 301 |
+
// Overriding it allow derived class to use their own conversions.
|
| 302 |
+
virtual Value IrValueFromScalar(
|
| 303 |
+
const at::Scalar& value,
|
| 304 |
+
at::ScalarType scalar_type,
|
| 305 |
+
const BackendDevice& device);
|
| 306 |
+
|
| 307 |
+
private:
|
| 308 |
+
std::vector<DeviceContext*> GetAllDeviceContexts();
|
| 309 |
+
|
| 310 |
+
std::mutex lock_;
|
| 311 |
+
std::map<BackendDevice, DeviceContext*> device_contexts_;
|
| 312 |
+
};
|
| 313 |
+
|
| 314 |
+
struct Async {
|
| 315 |
+
Async(
|
| 316 |
+
SyncTensorCollection* coll,
|
| 317 |
+
std::vector<BackendDataPtr> parameters_data,
|
| 318 |
+
std::vector<BackendDataPtr> tensors_data,
|
| 319 |
+
ComputationCache::TypePtr cached_computation);
|
| 320 |
+
virtual ~Async() = default;
|
| 321 |
+
|
| 322 |
+
void Wait();
|
| 323 |
+
|
| 324 |
+
MultiWait mwait;
|
| 325 |
+
std::vector<size_t> indices;
|
| 326 |
+
std::vector<ExceptionCleanup> unlocker;
|
| 327 |
+
std::vector<BackendDataPtr> parameters_data;
|
| 328 |
+
BackendDevice device;
|
| 329 |
+
ComputationCache::TypePtr cached_computation;
|
| 330 |
+
std::vector<BackendDataPtr> tensors_data;
|
| 331 |
+
};
|
| 332 |
+
|
| 333 |
+
void ResetTrimCounter() const;
|
| 334 |
+
|
| 335 |
+
// Waits for this SyncTensorCollection's device barrier and acquire the lock.
|
| 336 |
+
virtual void TensorCollectionBarrier(SyncTensorCollection* coll);
|
| 337 |
+
|
| 338 |
+
// One can override to insert your own profiler.
|
| 339 |
+
virtual PostOrderData RunPostOrder(
|
| 340 |
+
const std::vector<Value>& ir_values,
|
| 341 |
+
SyncTensorCollection* coll);
|
| 342 |
+
|
| 343 |
+
private:
|
| 344 |
+
struct CompilationResult {
|
| 345 |
+
BackendDevice device;
|
| 346 |
+
size_t emitted_nodes = 0;
|
| 347 |
+
ComputationPtr computation;
|
| 348 |
+
std::vector<BackendDataPtr> parameters_data;
|
| 349 |
+
};
|
| 350 |
+
|
| 351 |
+
virtual bool ShouldSyncTensor(const LazyTensorPtr& tensor) const;
|
| 352 |
+
|
| 353 |
+
SyncTensorCollection CollectSyncTensors(
|
| 354 |
+
const std::vector<LazyTensorPtr>& tensors,
|
| 355 |
+
const SyncTensorsConfig& config);
|
| 356 |
+
|
| 357 |
+
std::vector<Value> CollectRoots(
|
| 358 |
+
const std::vector<LazyTensorPtr>& tensors,
|
| 359 |
+
c10::ArrayRef<size_t> indices);
|
| 360 |
+
|
| 361 |
+
std::vector<BackendDataPtr> SetTensorData(
|
| 362 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 363 |
+
const SyncTensorsConfig& config,
|
| 364 |
+
c10::ArrayRef<size_t> indices,
|
| 365 |
+
const std::vector<torch::lazy::BackendDataPtr>& tensor_data_vec);
|
| 366 |
+
|
| 367 |
+
void ExtractIRAndPrepareTensorData(
|
| 368 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 369 |
+
const SyncTensorsConfig& config,
|
| 370 |
+
c10::ArrayRef<size_t> indices,
|
| 371 |
+
std::vector<Value>& ir_values,
|
| 372 |
+
std::vector<BackendDataPtr>& tensor_data_vec);
|
| 373 |
+
|
| 374 |
+
std::shared_ptr<Async> TryRunCachedSync(
|
| 375 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 376 |
+
SyncTensorCollection* coll,
|
| 377 |
+
PostOrderData* po_data,
|
| 378 |
+
const std::vector<BackendDataPtr>& tensor_data_vec);
|
| 379 |
+
|
| 380 |
+
CompilationResult Compile(
|
| 381 |
+
const std::vector<LazyTensorPtr>& tensors,
|
| 382 |
+
c10::ArrayRef<std::string> devices,
|
| 383 |
+
const SyncTensorCollection& coll,
|
| 384 |
+
PostOrderData* po_data,
|
| 385 |
+
const std::vector<Value>& ir_values);
|
| 386 |
+
|
| 387 |
+
ComputationCache::TypePtr LookupCachedCompile(const hash_t& hash);
|
| 388 |
+
|
| 389 |
+
std::shared_ptr<Async> SyncTensorsGraphInternal(
|
| 390 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 391 |
+
c10::ArrayRef<std::string> devices,
|
| 392 |
+
const SyncTensorsConfig& config);
|
| 393 |
+
|
| 394 |
+
// Schedules the execution of a sync tensors operation in background. The
|
| 395 |
+
// asynchronous operation will hold the device locks by capturing the ones
|
| 396 |
+
// present within the coll structure.
|
| 397 |
+
std::shared_ptr<Async> ScheduleSyncTensorsGraph(
|
| 398 |
+
SyncTensorCollection* coll,
|
| 399 |
+
std::vector<BackendDataPtr> parameters_data,
|
| 400 |
+
std::vector<BackendDataPtr> tensors_data,
|
| 401 |
+
ComputationCache::TypePtr cached_computation);
|
| 402 |
+
|
| 403 |
+
std::shared_ptr<Async> ScheduleSyncTensorsGraph(
|
| 404 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 405 |
+
SyncTensorCollection* coll,
|
| 406 |
+
std::vector<BackendDataPtr> parameters_data,
|
| 407 |
+
ComputationCache::TypePtr cached_computation,
|
| 408 |
+
const std::vector<BackendDataPtr>& tensor_data_vec);
|
| 409 |
+
|
| 410 |
+
std::vector<at::Tensor> GetTensorsFused(std::vector<LazyTensorPtr>* tensors);
|
| 411 |
+
|
| 412 |
+
std::vector<at::Tensor> FetchTensors(
|
| 413 |
+
std::vector<LazyTensorPtr>* tensors,
|
| 414 |
+
c10::ArrayRef<BackendDataPtr> tensors_data,
|
| 415 |
+
const std::vector<size_t>* indices);
|
| 416 |
+
|
| 417 |
+
// Gathers the device data for all the input tensors, after an
|
| 418 |
+
// asynchronous operation.
|
| 419 |
+
std::vector<BackendDataPtr> GatherTensorsData(
|
| 420 |
+
const std::vector<LazyTensorPtr>& tensors,
|
| 421 |
+
c10::ArrayRef<size_t> indices,
|
| 422 |
+
c10::ArrayRef<BackendDataPtr> tensors_data);
|
| 423 |
+
};
|
| 424 |
+
|
| 425 |
+
} // namespace lazy
|
| 426 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/lazy/core/ir.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace lazy {
|
| 7 |
+
|
| 8 |
+
TORCH_API NodePtr operator+(const Value& node1, const Value& node2);
|
| 9 |
+
TORCH_API NodePtr operator-(const Value& node1, const Value& node2);
|
| 10 |
+
TORCH_API NodePtr operator*(const Value& node1, const Value& node2);
|
| 11 |
+
TORCH_API NodePtr operator/(const Value& node1, const Value& node2);
|
| 12 |
+
|
| 13 |
+
} // namespace lazy
|
| 14 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/SymNodeImpl.h>
|
| 4 |
+
#include <c10/util/intrusive_ptr.h>
|
| 5 |
+
#include <torch/csrc/lazy/backend/backend_data.h>
|
| 6 |
+
#include <torch/csrc/lazy/backend/backend_device.h>
|
| 7 |
+
#include <torch/csrc/lazy/core/ir.h>
|
| 8 |
+
#include <torch/csrc/lazy/core/util.h>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace lazy {
|
| 12 |
+
|
| 13 |
+
class TORCH_API SymNodeImpl : public c10::SymNodeImpl {
|
| 14 |
+
public:
|
| 15 |
+
SymNodeImpl(NodePtr ptr) : node_(std::move(ptr)){};
|
| 16 |
+
NodePtr node_;
|
| 17 |
+
};
|
| 18 |
+
|
| 19 |
+
class LazyTensor;
|
| 20 |
+
using LazyTensorPtr = c10::intrusive_ptr<LazyTensor>;
|
| 21 |
+
|
| 22 |
+
class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
|
| 23 |
+
public:
|
| 24 |
+
// This is the core lazy tensor data structure where all the tensor data is
|
| 25 |
+
// held. The lazy tensor is nothing more than a shared pointer to a Data
|
| 26 |
+
// object.
|
| 27 |
+
struct Data {
|
| 28 |
+
Data(BackendDataPtr handle, BackendDevice device)
|
| 29 |
+
: handle(std::move(handle)),
|
| 30 |
+
device(std::move(device)),
|
| 31 |
+
unique_id(GetNextTensorId()) {}
|
| 32 |
+
Data(Value ir_value, BackendDevice device)
|
| 33 |
+
: ir_value(std::move(ir_value)),
|
| 34 |
+
device(std::move(device)),
|
| 35 |
+
unique_id(GetNextTensorId()) {}
|
| 36 |
+
Data(at::Tensor tensor_data, BackendDevice device)
|
| 37 |
+
: tensor_data(std::move(tensor_data)),
|
| 38 |
+
device(std::move(device)),
|
| 39 |
+
unique_id(GetNextTensorId()) {}
|
| 40 |
+
// TODO(alanwaketan): Remove this ctor. This is a
|
| 41 |
+
// temporary ctor to ease XLA LTC migration. It depends on
|
| 42 |
+
// XLA's Functionalization integration.
|
| 43 |
+
Data(BackendDevice device)
|
| 44 |
+
: device(std::move(device)), unique_id(GetNextTensorId()) {}
|
| 45 |
+
|
| 46 |
+
virtual ~Data();
|
| 47 |
+
|
| 48 |
+
BackendDataPtr handle;
|
| 49 |
+
Value ir_value;
|
| 50 |
+
c10::optional<at::Tensor> tensor_data;
|
| 51 |
+
const BackendDevice device;
|
| 52 |
+
const int64_t unique_id = 0;
|
| 53 |
+
size_t generation = 1;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
static LazyTensorPtr Create(
|
| 57 |
+
const at::Tensor& tensor,
|
| 58 |
+
const BackendDevice& device);
|
| 59 |
+
static LazyTensorPtr Create(Value ir_value, const BackendDevice& device);
|
| 60 |
+
static LazyTensorPtr Create(BackendDataPtr handle);
|
| 61 |
+
static LazyTensorPtr Create(std::shared_ptr<Data> data);
|
| 62 |
+
|
| 63 |
+
// The default ctor previously created a null LazyTensor (one with no 'data'
|
| 64 |
+
// obj). Creating a null LazyTensor is no longer possible, since the same can
|
| 65 |
+
// be achieved by creating a null LazyTensorPtr and it is way too confusing to
|
| 66 |
+
// have to check both lazy_tensor_ptr && *lazy_tensor_ptr, so everywhere that
|
| 67 |
+
// used to rely on a LazyTensor obj with a null Data can now rely on a null
|
| 68 |
+
// LazyTensorPtr instead.
|
| 69 |
+
LazyTensor() = delete;
|
| 70 |
+
LazyTensor(const LazyTensor&) = default;
|
| 71 |
+
LazyTensor(LazyTensor&&) noexcept = default;
|
| 72 |
+
|
| 73 |
+
~LazyTensor() override = default;
|
| 74 |
+
|
| 75 |
+
size_t generation() const {
|
| 76 |
+
return data()->generation;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
// Override it to use your own Shape.
|
| 80 |
+
virtual int64_t size(int64_t dim) const;
|
| 81 |
+
|
| 82 |
+
// Override it to use your own graph executor.
|
| 83 |
+
virtual at::Tensor ToTensor(bool detached);
|
| 84 |
+
|
| 85 |
+
void ShallowCopyTo(LazyTensorPtr dest) const;
|
| 86 |
+
|
| 87 |
+
// Assigns the tensor value to the lazy tensor.
|
| 88 |
+
void SetTensor(at::Tensor tensor);
|
| 89 |
+
|
| 90 |
+
void UpdateFromTensor(at::Tensor tensor, bool sync);
|
| 91 |
+
void UpdateFromTensorOut(at::Tensor tensor);
|
| 92 |
+
void UpdateFromTensorOut(const LazyTensorPtr& tensor);
|
| 93 |
+
|
| 94 |
+
const std::shared_ptr<Data>& data() const;
|
| 95 |
+
|
| 96 |
+
// Override it to use your own type conversion.
|
| 97 |
+
virtual at::ScalarType dtype() const;
|
| 98 |
+
|
| 99 |
+
MaybeRef<Shape> shape() const;
|
| 100 |
+
|
| 101 |
+
const BackendDevice& GetDevice() const;
|
| 102 |
+
int64_t GetUniqueId() const;
|
| 103 |
+
|
| 104 |
+
// Fetches the data behind the tensor. If the tensor has a graph defining
|
| 105 |
+
// its current value, executes the graph and fetches the data result.
|
| 106 |
+
BackendDataPtr GetDataHandle();
|
| 107 |
+
|
| 108 |
+
// Fetches the current value of the data, which can be missing (nullptr)
|
| 109 |
+
// in case the tensor has a graph defining its current value,
|
| 110 |
+
BackendDataPtr CurrentDataHandle() const;
|
| 111 |
+
|
| 112 |
+
void SetDataHandle(BackendDataPtr handle);
|
| 113 |
+
void SetDataHandle(BackendDataPtr handle, bool sync);
|
| 114 |
+
|
| 115 |
+
// Retrieves the current IR Node, or nullptr in case no active IR Node is
|
| 116 |
+
// available.
|
| 117 |
+
Value CurrentIrValue() const;
|
| 118 |
+
|
| 119 |
+
// Retrieves the IR Node representing this LazyTensor. One will be created if
|
| 120 |
+
// missing. Note that although this is a const API, it actually changes the
|
| 121 |
+
// internal state ofthe object.
|
| 122 |
+
Value GetIrValue() const;
|
| 123 |
+
|
| 124 |
+
void SetIrValue(Value ir_value);
|
| 125 |
+
void SetInPlaceIrValue(Value ir_value);
|
| 126 |
+
|
| 127 |
+
c10::optional<at::Tensor> CurrentTensorData() const;
|
| 128 |
+
|
| 129 |
+
std::vector<LazyTensorPtr> MakeOutputTensors(NodePtr node) const;
|
| 130 |
+
|
| 131 |
+
LazyTensorPtr CopyTensorToDevice(const BackendDevice& device);
|
| 132 |
+
|
| 133 |
+
// Applies the queue of operations in preparation for using the data.
|
| 134 |
+
// Override it to use your own graph executor.
|
| 135 |
+
virtual void ApplyPendingGraph();
|
| 136 |
+
|
| 137 |
+
// Override it to set extra information.
|
| 138 |
+
virtual void AssignIrValue(Value ir_value) const;
|
| 139 |
+
|
| 140 |
+
protected:
|
| 141 |
+
explicit LazyTensor(std::shared_ptr<Data> data);
|
| 142 |
+
|
| 143 |
+
void SetTensorData(at::Tensor tensor_data);
|
| 144 |
+
|
| 145 |
+
// We build a graph accumulating operations, but at a given point we
|
| 146 |
+
// need to force a rendering, otherwise the graph can grow without control.
|
| 147 |
+
// Think:
|
| 148 |
+
// for i in range(0, 100000):
|
| 149 |
+
// a = a + b
|
| 150 |
+
void TryLimitGraphSize();
|
| 151 |
+
|
| 152 |
+
// Override it to instantiate your own data.
|
| 153 |
+
virtual Value GetIrValueForTensor(
|
| 154 |
+
const at::Tensor& tensor,
|
| 155 |
+
const BackendDevice& device) const;
|
| 156 |
+
|
| 157 |
+
Value CreateTensorNode(BackendDataPtr data, bool read_only) const;
|
| 158 |
+
|
| 159 |
+
private:
|
| 160 |
+
LazyTensor(const at::Tensor& tensor, const BackendDevice& device);
|
| 161 |
+
LazyTensor(Value ir_value, const BackendDevice& device);
|
| 162 |
+
explicit LazyTensor(BackendDataPtr handle);
|
| 163 |
+
|
| 164 |
+
static int64_t GetNextTensorId();
|
| 165 |
+
|
| 166 |
+
std::shared_ptr<Data> data_;
|
| 167 |
+
};
|
| 168 |
+
|
| 169 |
+
// Utils to convert at::Tensor to LazyTensor, and vice versa.
|
| 170 |
+
|
| 171 |
+
// Section 0: c10::Tensorlist ==> lazy::TensorList
|
| 172 |
+
// note: GetTensorList is not totally parallel to GetLtcTensor; A TensorList
|
| 173 |
+
// skips
|
| 174 |
+
// the LazyTensor wrappers, assuming that the list of underlying IR nodes
|
| 175 |
+
// is actually more useful for downstream computations. TBD.
|
| 176 |
+
TORCH_API torch::lazy::Value GetTensorList(at::ITensorListRef tensors);
|
| 177 |
+
|
| 178 |
+
// Section 1: at::Tensor => LazyTensor.
|
| 179 |
+
// Extracts the LazyTensor out of an at::Tensor. Returns a null LazyTensor
|
| 180 |
+
// if the tensor is not a lazy tensor.
|
| 181 |
+
TORCH_API LazyTensorPtr TryGetLtcTensor(const at::Tensor& tensor);
|
| 182 |
+
|
| 183 |
+
// Extracts the LazyTensor out of an at::Tensor. Throws an exception
|
| 184 |
+
// if the tensor is not a lazy tensor.
|
| 185 |
+
TORCH_API LazyTensorPtr GetLtcTensor(const at::Tensor& tensor);
|
| 186 |
+
|
| 187 |
+
// Same as above, applied to a list of tensors.
|
| 188 |
+
TORCH_API std::vector<LazyTensorPtr> GetLtcTensors(
|
| 189 |
+
c10::ArrayRef<at::Tensor> tensors);
|
| 190 |
+
|
| 191 |
+
// If tensor is a lazy tensor type, returns the LazyTensor embedded within it,
|
| 192 |
+
// otherwise creates a new lazy tensor type with tensor as data.
|
| 193 |
+
TORCH_API LazyTensorPtr GetOrCreateLtcTensor(
|
| 194 |
+
const c10::optional<at::Tensor>& tensor,
|
| 195 |
+
const BackendDevice& device);
|
| 196 |
+
|
| 197 |
+
TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber(
|
| 198 |
+
const at::Tensor& tensor,
|
| 199 |
+
const BackendDevice& device);
|
| 200 |
+
|
| 201 |
+
// Section 2: LazyTensor => at::Tensor.
|
| 202 |
+
// Creates an ATen tensor from an LazyTensor.
|
| 203 |
+
TORCH_API at::Tensor CreateAtenFromLtcTensor(const LazyTensorPtr& ltc_tensor);
|
| 204 |
+
TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor);
|
| 205 |
+
|
| 206 |
+
// Note [Lazy Tensor Functionalization]
|
| 207 |
+
// The functionalization pass is implemented by wrapping all TensorImpl
|
| 208 |
+
// objects in C++ with an extra FunctionalTensorWrapper object,
|
| 209 |
+
// that knows how to perform functionalization
|
| 210 |
+
//
|
| 211 |
+
// Certain functions in the aten API serve as entry/exit points for
|
| 212 |
+
// functionalization, where we need to perform the wrapping/unwrapping:
|
| 213 |
+
// - aten::to.device
|
| 214 |
+
// - aten::empty
|
| 215 |
+
|
| 216 |
+
// Given a non-lazy tensor, this function creates a lazy tensor on the specified
|
| 217 |
+
// (lazy) device. The functionalize_output determines whether or not we should
|
| 218 |
+
// wrap the output in a "functional wrapper".
|
| 219 |
+
//
|
| 220 |
+
// How do you know whether to pass true/false for functionalize_output?
|
| 221 |
+
//
|
| 222 |
+
// Case 1: nonlazy -> lazy
|
| 223 |
+
// If you're implementing a function that takes in nonlazy tensors and returns
|
| 224 |
+
// lazy tensors, then you should think of that function as an "entrypoint" to
|
| 225 |
+
// functionalization, and use functionalize_output=true Examples include:
|
| 226 |
+
// - factory functions (the LTC kernel for at::empty)
|
| 227 |
+
// - CPU -> Lazy device converions (the LTC kernel for at::to_device)
|
| 228 |
+
//
|
| 229 |
+
// Case 2: lazy -> lazy
|
| 230 |
+
// If you're implementing a function that takes in lazy tensors and returns
|
| 231 |
+
// lazy tensors,
|
| 232 |
+
// **but** requires creating lazy tensors internally,
|
| 233 |
+
// then you can assume that the current function is running inside of some
|
| 234 |
+
// outer context where functionalization is already running, that will take
|
| 235 |
+
// care of doing the wrapping for you, and use functionalize_output=true
|
| 236 |
+
// Examples include:
|
| 237 |
+
// - CPU fallback (takes in lazy tensors, converts to cpu, calls kernel,
|
| 238 |
+
// converts returns back to lazy tensors).
|
| 239 |
+
TORCH_API at::Tensor to_lazy_tensor(
|
| 240 |
+
const at::Tensor& self,
|
| 241 |
+
const c10::TensorOptions& options,
|
| 242 |
+
at::Device device,
|
| 243 |
+
bool non_blocking,
|
| 244 |
+
bool functionalize_output);
|
| 245 |
+
|
| 246 |
+
template <size_t... Indices>
|
| 247 |
+
auto TupleAtenFromLtcTensorsImpl(
|
| 248 |
+
const std::vector<LazyTensorPtr>& tensors,
|
| 249 |
+
std::index_sequence<Indices...>) {
|
| 250 |
+
return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
template <size_t N>
|
| 254 |
+
auto TupleAtenFromLtcTensors(const std::vector<LazyTensorPtr>& tensors) {
|
| 255 |
+
return TupleAtenFromLtcTensorsImpl(tensors, std::make_index_sequence<N>{});
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
} // namespace lazy
|
| 259 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/lazy/backend/backend_interface.h>
|
| 4 |
+
#include <torch/csrc/lazy/core/shape.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/FunctionalTensorWrapper.h>
|
| 7 |
+
|
| 8 |
+
#include <string>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace lazy {
|
| 13 |
+
|
| 14 |
+
TORCH_API std::vector<int64_t> ComputeArrayStrides(
|
| 15 |
+
c10::ArrayRef<int64_t> sizes);
|
| 16 |
+
|
| 17 |
+
TORCH_API std::vector<at::Tensor> DataHandlesToTensors(
|
| 18 |
+
c10::ArrayRef<BackendDataPtr> data_handles,
|
| 19 |
+
at::ScalarType dest_element_type);
|
| 20 |
+
|
| 21 |
+
// Uploads an ATEN tensor data to the device and fetches the corresponding
|
| 22 |
+
// device data handle.
|
| 23 |
+
TORCH_API BackendDataPtr
|
| 24 |
+
TensorToDataHandle(const at::Tensor& tensor, const BackendDevice& device);
|
| 25 |
+
|
| 26 |
+
// Retrieves the device data handles by parallel uploading data onto the
|
| 27 |
+
// corresponding devices.
|
| 28 |
+
TORCH_API std::vector<BackendDataPtr> CreateTensorsData(
|
| 29 |
+
const std::vector<at::Tensor>& tensors,
|
| 30 |
+
const std::vector<BackendDevice>& devices);
|
| 31 |
+
|
| 32 |
+
// Makes a deep copy of an ATEN tensor.
|
| 33 |
+
inline at::Tensor CopyTensor(const at::Tensor& ref) {
|
| 34 |
+
return ref.to(ref.options(), /*non_blocking=*/false, /*copy=*/true);
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
// Same as above, with an additional cast.
|
| 38 |
+
inline at::Tensor CopyTensor(
|
| 39 |
+
const at::Tensor& ref,
|
| 40 |
+
at::ScalarType dest_type,
|
| 41 |
+
bool copy = true) {
|
| 42 |
+
return ref.to(ref.options().dtype(dest_type), /*non_blocking=*/false, copy);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
template <typename T, typename S>
|
| 46 |
+
T OptionalOr(const c10::optional<S>& value, T defval) {
|
| 47 |
+
return value ? static_cast<T>(*value) : defval;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
// Unwraps tensor to target dtype if it's a wrapped number.
|
| 51 |
+
inline at::Tensor UnwrapNumber(const at::Tensor& tensor, at::ScalarType dtype) {
|
| 52 |
+
return tensor.unsafeGetTensorImpl()->is_wrapped_number() ? tensor.to(dtype)
|
| 53 |
+
: tensor;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
template <typename T>
|
| 57 |
+
at::Scalar MakeIntScalar(T value) {
|
| 58 |
+
return at::Scalar(static_cast<int64_t>(value));
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
// Routing values to device data maximizes the changes for compilation cache
|
| 62 |
+
// hits, but it can prevent the compiler to perform optimizations. So tensor
|
| 63 |
+
// values which are within a given set, are routed to constant scalars if this
|
| 64 |
+
// API returns true.
|
| 65 |
+
TORCH_API bool IsSpecialScalar(const at::Scalar& value);
|
| 66 |
+
|
| 67 |
+
// Note: returns a reference instead of a fresh tensor to avoid refcount bumps.
|
| 68 |
+
inline const at::Tensor& maybe_unwrap_functional(const at::Tensor& tensor) {
|
| 69 |
+
if (at::functionalization::impl::isFunctionalTensor(tensor)) {
|
| 70 |
+
return at::functionalization::impl::unsafeGetFunctionalWrapper(tensor)
|
| 71 |
+
->value();
|
| 72 |
+
} else {
|
| 73 |
+
return tensor;
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
} // namespace lazy
|
| 78 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* This file is adapted from PyTorch/XLA
|
| 3 |
+
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <functional>
|
| 9 |
+
#include <memory>
|
| 10 |
+
#include <thread>
|
| 11 |
+
|
| 12 |
+
#include <c10/macros/Export.h>
|
| 13 |
+
|
| 14 |
+
namespace torch {
|
| 15 |
+
namespace lazy {
|
| 16 |
+
|
| 17 |
+
class TORCH_API Completion {
|
| 18 |
+
public:
|
| 19 |
+
class Data;
|
| 20 |
+
|
| 21 |
+
explicit Completion(std::shared_ptr<Data> data);
|
| 22 |
+
|
| 23 |
+
~Completion();
|
| 24 |
+
|
| 25 |
+
void Wait();
|
| 26 |
+
|
| 27 |
+
private:
|
| 28 |
+
std::shared_ptr<Data> data_;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
// Schedules a closure which might wait for IO or other events/conditions.
|
| 32 |
+
TORCH_API void ScheduleIoClosure(std::function<void()> closure);
|
| 33 |
+
TORCH_API Completion
|
| 34 |
+
ScheduleIoClosureWithCompletion(std::function<void()> closure);
|
| 35 |
+
|
| 36 |
+
} // namespace lazy
|
| 37 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Most of the utils in this file is adapted from PyTorch/XLA
|
| 3 |
+
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/util.h
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <exception>
|
| 9 |
+
#include <functional>
|
| 10 |
+
#include <vector>
|
| 11 |
+
|
| 12 |
+
#include <c10/util/Optional.h>
|
| 13 |
+
#include <c10/util/OptionalArrayRef.h>
|
| 14 |
+
|
| 15 |
+
namespace torch {
|
| 16 |
+
namespace lazy {
|
| 17 |
+
|
| 18 |
+
// Similar to c10::scope_exit but with a status.
|
| 19 |
+
// TODO(alanwaketan): Consolidate it with c10::scope_exit.
|
| 20 |
+
template <typename T>
|
| 21 |
+
class Cleanup {
|
| 22 |
+
public:
|
| 23 |
+
using StatusType = T;
|
| 24 |
+
|
| 25 |
+
explicit Cleanup(std::function<void(StatusType&&)>&& func)
|
| 26 |
+
: func_(std::move(func)) {}
|
| 27 |
+
Cleanup(Cleanup&& ref) noexcept
|
| 28 |
+
: func_(std::move(ref.func_)), status_(std::move(ref.status_)) {}
|
| 29 |
+
Cleanup(const Cleanup&) = delete;
|
| 30 |
+
|
| 31 |
+
~Cleanup() {
|
| 32 |
+
if (func_ != nullptr) {
|
| 33 |
+
func_(std::move(status_));
|
| 34 |
+
}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
Cleanup& operator=(const Cleanup&) = delete;
|
| 38 |
+
|
| 39 |
+
Cleanup& operator=(Cleanup&& ref) noexcept {
|
| 40 |
+
if (this != &ref) {
|
| 41 |
+
func_ = std::move(ref.func_);
|
| 42 |
+
status_ = std::move(ref.status_);
|
| 43 |
+
}
|
| 44 |
+
return *this;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
void Release() {
|
| 48 |
+
func_ = nullptr;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
void SetStatus(StatusType&& status) {
|
| 52 |
+
status_ = std::move(status);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
const StatusType& GetStatus() const {
|
| 56 |
+
return status_;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
private:
|
| 60 |
+
std::function<void(StatusType&&)> func_;
|
| 61 |
+
StatusType status_;
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
using ExceptionCleanup = Cleanup<std::exception_ptr>;
|
| 65 |
+
|
| 66 |
+
// Allows APIs which might return const references and values, to not be forced
|
| 67 |
+
// to return values in the signature.
|
| 68 |
+
// TODO(alanwaketan): This is clever, but is there really no std or c10
|
| 69 |
+
// supports? Needs more investigations.
|
| 70 |
+
template <typename T>
|
| 71 |
+
class MaybeRef {
|
| 72 |
+
public:
|
| 73 |
+
/* implicit */ MaybeRef(const T& ref) : ref_(ref) {}
|
| 74 |
+
/* implicit */ MaybeRef(T&& value)
|
| 75 |
+
: storage_(std::move(value)), ref_(*storage_) {}
|
| 76 |
+
|
| 77 |
+
const T& Get() const {
|
| 78 |
+
return ref_;
|
| 79 |
+
}
|
| 80 |
+
const T& operator*() const {
|
| 81 |
+
return Get();
|
| 82 |
+
}
|
| 83 |
+
operator const T&() const {
|
| 84 |
+
return Get();
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
bool IsStored() const {
|
| 88 |
+
return storage_.has_value();
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
private:
|
| 92 |
+
c10::optional<T> storage_;
|
| 93 |
+
const T& ref_;
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
template <typename T>
|
| 97 |
+
std::vector<T> Iota(size_t size, T init = 0, T incr = 1) {
|
| 98 |
+
std::vector<T> result(size);
|
| 99 |
+
T value = init;
|
| 100 |
+
for (size_t i = 0; i < size; ++i, value += incr) {
|
| 101 |
+
result[i] = value;
|
| 102 |
+
}
|
| 103 |
+
return result;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
template <typename T, typename S>
|
| 107 |
+
std::vector<T> ToVector(const S& input) {
|
| 108 |
+
return std::vector<T>(input.begin(), input.end());
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template <typename T>
|
| 112 |
+
c10::optional<std::vector<T>> ToOptionalVector(
|
| 113 |
+
c10::OptionalArrayRef<T> arrayRef) {
|
| 114 |
+
if (arrayRef) {
|
| 115 |
+
return arrayRef->vec();
|
| 116 |
+
}
|
| 117 |
+
return c10::nullopt;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
template <typename T>
|
| 121 |
+
typename std::underlying_type<T>::type GetEnumValue(T value) {
|
| 122 |
+
return static_cast<typename std::underlying_type<T>::type>(value);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
} // namespace lazy
|
| 126 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/Optional.h>
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/lazy/core/ir_metadata.h>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace lazy {
|
| 9 |
+
|
| 10 |
+
c10::optional<SourceLocation> TORCH_PYTHON_API GetPythonFrameTop();
|
| 11 |
+
|
| 12 |
+
std::vector<SourceLocation> TORCH_PYTHON_API GetPythonFrames();
|
| 13 |
+
|
| 14 |
+
} // namespace lazy
|
| 15 |
+
} // namespace torch
|