diff --git a/.gitattributes b/.gitattributes index d47de70f70bfc1f36f9c52d2d8795bcf34550d48..196264b7a2453c21df02e5d30dfa6696ce603926 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1685,3 +1685,5 @@ parrot/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64 vllm/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h new file mode 100644 index 0000000000000000000000000000000000000000..db9742e04021e6fa6942c540c28f4ca6ff90d5df --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h @@ -0,0 +1,13 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at::native { + +TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits::max()); + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..23c9b77159a220656b8909c1cee16ed4721e5bd9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h @@ -0,0 +1,369 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include +#include + +namespace at::native { +namespace { +// Check if tensor list has either a boolean tensor or a integer tensor +inline bool has_integral_tensor(TensorList tensors, const bool includeBool) { + return std::any_of( + tensors.begin(), tensors.end(), [&includeBool](const auto& t) { + return at::isIntegralType(t.scalar_type(), includeBool); + }); +} +// check if tensor list has bool tensors +inline bool has_bool_tensor(TensorList tensors) { + return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool { + return t.scalar_type() == ScalarType::Bool; + }); +} + +// Check foreach API restrictions +// - Tensor lists must be non-empty. +// - All TensorLists and ScalarLists must have the same number of elements. +// - Corresponding tensors must have the same size. +inline void check_foreach_api_restrictions(TensorList tensors) { + TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor."); +} + +inline void check_foreach_api_restrictions( + TensorList tensors, + ArrayRef scalars) { + check_foreach_api_restrictions(tensors); + TORCH_CHECK( + tensors.size() == scalars.size(), + "Tensor list must have same number of elements as scalar list."); +} + +inline void check_foreach_api_restrictions( + TensorList tensors1, + TensorList tensors2) { + TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK( + tensors1.size() == tensors2.size(), + "Tensor lists must have the same number of tensors, got ", + tensors1.size(), + " and ", + tensors2.size()); +} + +inline void check_foreach_api_restrictions( + TensorList tensors1, + TensorList tensors2, + TensorList tensors3) { + TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK(!tensors3.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK( + tensors1.size() == tensors2.size(), + "Tensor lists must have the same number of tensors, got ", + tensors1.size(), + " and ", + tensors2.size()); + TORCH_CHECK( + tensors1.size() == tensors3.size(), + "Tensor lists must have the same number of tensors, got ", + tensors1.size(), + " and ", + tensors3.size()); +} + +inline void check_foreach_api_restrictions( + TensorList tensors1, + TensorList tensors2, + TensorList tensors3, + ArrayRef scalars) { + check_foreach_api_restrictions(tensors1, tensors2, tensors3); + TORCH_CHECK( + tensors1.size() == scalars.size(), + "Tensor list must have same number of elements as scalar list, got ", + tensors1.size(), + " and ", + scalars.size()); +} + +// Helper function called in check_fast_path_restrictions to check whether all +// corresponding tensors (aligning in index across the tensorLists) share the +// same device and dtype. +inline bool _check_tensors_share_device_and_dtype( + ArrayRef tensorLists) { + const auto expected_dtype = tensorLists[0][0].dtype(); + const auto expected_device = tensorLists[0][0].device(); + + auto is_tensor_okay = [&](const Tensor& tensor) { + return tensor.dtype() == expected_dtype && + tensor.device() == expected_device && tensor.layout() == at::kStrided && + tensor.is_non_overlapping_and_dense(); + }; + + for (const auto& tensorList : tensorLists) { + for (const auto& tensor : tensorList) { + if (!is_tensor_okay(tensor)) { + return false; + } + } + } + + return true; +} + +// Helper function called in check_fast_path_restrictions to check if +// corresponding tensors in tensor lists have the same sizes and strides. +inline bool _check_tensors_share_sizes_and_strides( + ArrayRef tensorLists) { + for (const auto i : c10::irange(1, tensorLists.size())) { + for (const auto j : c10::irange(tensorLists[0].size())) { + if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() || + tensorLists[0][j].strides() != tensorLists[i][j].strides()) { + return false; + } + } + } + + return true; +} + +// Helper function called in check_fast_path_restrictions to check whether +// all tensors type promote properly with the scalars in scalarList. This +// function assumes that _check_tensors_share_device_and_dtype has already been +// called so that all corresponding tensors in tensorLists have the same dtype. +// Then, it is sufficient to check the type promotion with just one tensorList. +inline bool _check_tensors_do_type_promotion_with_scalars( + TensorList tensorList, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + for (const auto i : c10::irange(tensorList.size())) { + // For division, integer inputs will result in float. + if (does_op_promote_integer_inputs_to_float) { + if (at::isIntegralType( + tensorList[i].scalar_type(), /*includeBool*/ true)) { + return false; + } + } + if (!scalarList.empty()) { + const auto& scalar = + scalarList.size() == 1 ? scalarList[0] : scalarList[i]; + const auto& tensor = tensorList[i]; + // note(mkozuki): This check might be responsible for + // `_foreach_add(bool_tensors, bool_tensors)` being pushed to slow path. + if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) { + return false; + } + } + } + + return true; +} + +// To go via 'fast' path, several conditions must be satisfied +// - All tensors in all lists must have the same dtype. +// - All tensors must be on the same device +// - All tensors must have strided layout +// - All tensors must be non-overlapping and dense +// - Resulting tensor must have the same dtype as the input one + +// Please, make sure to call check_foreach_api_restrictions before calling this +// method. There is a set of preconditions that have to be satisfied. +inline bool check_fast_path_restrictions( + ArrayRef tensorLists, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + return _check_tensors_share_device_and_dtype(tensorLists) && + _check_tensors_share_sizes_and_strides(tensorLists) && + _check_tensors_do_type_promotion_with_scalars( + tensorLists[0], + scalarList, + does_op_promote_integer_inputs_to_float); +} + +inline std::vector convert_tensor_to_scalar_list( + const Tensor& scalarList_, + int64_t expect_length) { + std::vector scalarList; + TORCH_CHECK( + scalarList_.device() == c10::kCPU, + "Expected scalars to be on CPU, got ", + scalarList_.device(), + " instead."); + TORCH_CHECK( + scalarList_.is_contiguous(), "Expected scalars to be contiguous."); + TORCH_CHECK( + scalarList_.dim() == 1, + "Expected packed scalar Tensor to be of dimension 1. Got ", + scalarList_.dim(), + " instead."); + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( + kComplexHalf, + kHalf, + kBool, + kBFloat16, + scalarList_.scalar_type(), + "convert_tensor_to_scalar_list", + [&]() { + const scalar_t* scalar_data = scalarList_.data_ptr(); + TORCH_CHECK( + (expect_length == scalarList_.size(0)), + "Expected length of scalars to match input of length ", + expect_length, + " but got ", + scalarList_.size(0), + " instead."); + for (int64_t i = 0; i < scalarList_.size(0); i++) { + scalarList.emplace_back(scalar_data[i]); + } + }); + return scalarList; +} + +inline bool can_use_fast_route( + ArrayRef tensorLists, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + return check_fast_path_restrictions( + tensorLists, scalarList, does_op_promote_integer_inputs_to_float); +} + +inline bool can_use_fast_route( + TensorList tensors1, + TensorList tensors2, + bool does_op_promote_integer_inputs_to_float = false) { + return can_use_fast_route( + {tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float); +} + +using DeviceDtypeKey = std::pair; +using IndicesT = std::vector; +using nested_optional_tensorvec_t = + std::vector>>; +using TensorsAndIndicesT = std::pair; +using FlatMap = std::unordered_map< + DeviceDtypeKey, + TensorsAndIndicesT, + ParamsHash>; + +inline FlatMap _group_tensors_by_first_tensors_device_and_dtype( + const nested_optional_tensorvec_t& nested_tensorlist, + const bool with_indices) { + FlatMap grouped_tensors_with_indices; + + TORCH_CHECK(!nested_tensorlist.empty()); + TORCH_CHECK(!nested_tensorlist[0].empty()); + const auto num_lists = nested_tensorlist.size(); + const auto num_tensors = nested_tensorlist[0].size(); + + TORCH_CHECK(std::all_of( + nested_tensorlist.cbegin(), + nested_tensorlist.cend(), + [&](const auto& tensorlist) -> bool { + // note(crcrpar): Allow empty tensorlists following + // ref: + // https://github.com/pytorch/pytorch/blob/85885301fd3c6adb8b9dc3cf7afadf6945566684/torch/utils/_foreach_utils.py#L21-L24 + return tensorlist.size() == num_tensors || tensorlist.size() == 0; + })); + + for (const auto& tensor_index : c10::irange(num_tensors)) { + const auto key = [&]() -> DeviceDtypeKey { + const auto t = nested_tensorlist[0][tensor_index]; + TORCH_CHECK( + t.has_value(), + "Tensors of the first list of nested Tensor lists are supposed to be defined but ", + "the ", + tensor_index, + "-th Tensor is not."); + return {t->device(), t->scalar_type()}; + }(); + TORCH_CHECK( + std::all_of( + nested_tensorlist.cbegin(), + nested_tensorlist.cend(), + [&](const auto& tensorlist) -> bool { + if (tensorlist.size() == 0) { + return true; + } + const auto& tensor = tensorlist[tensor_index]; + // note(crcrpar): Currently the scope of this function is + // optimizers so there could be `state_steps` and other scalars + // whose elements are float tensors no matter what the parameter's + // dtype is. + if (!tensor.has_value()) { + return true; + } else { + const auto s = tensor->scalar_type(); + const auto d = tensor->device(); + // Note: `step` or `state_step` is float32 by default. + if (key.first == d) { + return key.second == s || s == at::ScalarType::Float; + } else if (d.is_cpu()) { + // note(crcrpar): There are some test cases (e.g. + // TestOptim::test_adam) where state_steps are on CPU and the + // others are on CUDA. Currently a state_step Tensor has the + // dtype of float. + return s == at::ScalarType::Float; + } else { + return false; + } + } + }), + "Tensors of the same index must be on the same device and the same dtype except `step` tensors that can be CPU and float32 notwithstanding"); + if (!grouped_tensors_with_indices.count(key)) { + grouped_tensors_with_indices.insert( + {key, + TensorsAndIndicesT{ + [&]() -> nested_optional_tensorvec_t { + nested_optional_tensorvec_t nested_tensorvec; + nested_tensorvec.reserve(num_lists); + for (const auto& i : c10::irange(num_lists)) { + std::vector> tensors; + if (!nested_tensorlist[i].empty()) { + // NB: num_tensors is the max possible length for any of + // the inner lists of tensor references. Reserving the max + // trades memory for perf. This should not have significant + // impact. + tensors.reserve(num_tensors); + } + nested_tensorvec.emplace_back(tensors); + } + return nested_tensorvec; + }(), + [&]() -> IndicesT { + if (!with_indices) { + return {}; + } else { + IndicesT indices; + indices.reserve(num_tensors); + return indices; + } + }()}}); + } + for (const auto& list_index : c10::irange(num_lists)) { + if (!nested_tensorlist[list_index].empty()) { + grouped_tensors_with_indices[key].first[list_index].emplace_back( + nested_tensorlist[list_index][tensor_index]); + } + } + if (with_indices) { + grouped_tensors_with_indices[key].second.emplace_back(tensor_index); + } + } + + return grouped_tensors_with_indices; +} + +} // namespace +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h new file mode 100644 index 0000000000000000000000000000000000000000..cd19fa4691ad04ea0b6e3aeea2cafe9eca907dd4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Histogram.h @@ -0,0 +1,16 @@ +#pragma once + +#include +#include + +namespace at::native { + +using histogramdd_fn = void(*)(const Tensor&, const c10::optional&, bool, Tensor&, const TensorList&); +using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional&, bool, Tensor&, const TensorList&, bool); +using histogram_select_outer_bin_edges_fn = void(*)(const Tensor& input, const int64_t N, std::vector &leftmost_edges, std::vector &rightmost_edges); + +DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub); +DECLARE_DISPATCH(histogramdd_linear_fn, histogramdd_linear_stub); +DECLARE_DISPATCH(histogram_select_outer_bin_edges_fn, histogram_select_outer_bin_edges_stub); + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h new file mode 100644 index 0000000000000000000000000000000000000000..584d07aeca358b2164f97d705dbaf172868f8a94 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include + +#include +#endif + +namespace at::native { +// This fallback should only be used for operations that are self inverse and have a corresponding tensor +// bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit. +// Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit. +// Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called. + +// NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit. +struct MathOpFallback { + MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {} + virtual bool is_bit_set(const Tensor&) = 0; + void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) { + /* + Situations to handle: + 1. Out-of-place operation. Easy: materialize all inputs and + call it a day. + 2. Inplace operation. Desugar x.add_(2) into x.conj_().add_(2).conj_(). + Materialize other inputs as in (1). + 3. out= operation. Desugar add(x, 2, out=y) into y.copy_(add(x, 2)) + Materialize other inputs as in (1). + + It is important to be able to tell if we READ from an argument and if we + WRITE to an argument. Conservative approach is to assume that we always + READ from an argument, but in out= operations you can skip + conjugating inputs on entry that never get used. In the current schema we + can't easily tell if the operation is in in-place or out= operation. + + Note: + 1. Mutable tensorlists containing tensors whose math bit set to true are disallowed. + 2. Mutable tensors with math bit set to true are unconditionally cloned to ensure + correct behavior in the case when the mutable tensor shares memory with non mutable arguments. + + If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory + with these mutable inputs would read into wrong values in the following cases: + 1. Non mutable inputs have their math bit set to false. + 2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory + with one or more mutable arg(s)) are cloned. + At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs. + */ + const auto& arguments = op.schema().arguments(); + const auto num_arguments = arguments.size(); + const auto stack_start = stack->size() - num_arguments; + + c10::optional is_write; + for (const auto i : c10::irange(num_arguments)) { + // Three possible states: + // 1. alias_info has no value --> out-of-place operation + // 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation + // 3. alias_info does have a value, alias_info->is_write=False --> view operation + const AliasInfo* alias_info = arguments[i].alias_info(); + if (alias_info != nullptr) { + if (is_write.has_value()) { + TORCH_CHECK(*is_write == alias_info->isWrite(), + "Unsupported operator for ", op_name, " fallback: ", op.schema().name(), + op_name, " fallback doesn't work for operators with a mix " + "mutable and non-mutable inputs that alias with outputs, " + "this must be implemented manually. " + "If you got this error on a core op, please report a bug to PyTorch."); + } else { + is_write = alias_info->isWrite(); + } + } + } + + if (is_write.has_value() && !*is_write) { + // We assume that view operators automatically handle the math bit + // correctly by propagating the dispatch key in key_set. + // This is not necessarily always right, so you should test these cases. + op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack); + return; + } + + // Mutable inputs with math bit set to True and their clones + std::vector> mutable_inputs_with_their_clones; + for (const auto i : c10::irange(num_arguments)) { + auto& ivalue = (*stack)[stack_start + i]; + if (!(ivalue.isTensor() || ivalue.isTensorList())) { + continue; + } + const auto& argument = arguments[i]; + bool mut_arg = false; + if (argument.alias_info()) { + // Was already tested by is_write loop above + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite()); + mut_arg = true; + } + if (ivalue.isTensor()) { + if (!is_bit_set(ivalue.toTensor())) { + continue; + } + auto tensor = std::move(ivalue).toTensor(); + auto resolved_tensor = at::clone(tensor); + if (mut_arg) { + TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ", + op_name, "bit set to true."); + mutable_inputs_with_their_clones.emplace_back(std::move(tensor), resolved_tensor); + } + (*stack)[stack_start + i] = std::move(resolved_tensor); + } else if (ivalue.isTensorList()) { + auto tensors = std::move(ivalue).toTensorList(); + for(const auto j : c10::irange(tensors.size())) { + const auto& tensor = tensors[j]; + if (!is_bit_set(tensor)) { + continue; + } + TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ", + op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ", + op.schema().name()); + tensors[j] = at::clone(tensor); + } + (*stack)[stack_start + i] = std::move(tensors); + } + } + + op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack); + + TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1); + + for (std::pair mut_tensors: mutable_inputs_with_their_clones) { + auto& mutable_input = mut_tensors.first; + auto& cloned_mutable_input = mut_tensors.second; + auto& ivalue = (*stack)[stack_start]; + auto returned_output = std::move(ivalue).toTensor(); + + // sanity check to ensure that the tensor in stack aliases the cloned_mutable_input + TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output)); + + // necessary for out= arg + at::native::resize_output(mutable_input, returned_output.sizes()); + + mutable_input.copy_(returned_output); + (*stack)[stack_start] = std::move(mutable_input); + } + } + + virtual ~MathOpFallback() = default; + + DispatchKey key; + string op_name; +}; + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..6cd4dcde370522874311f43dbcdfca0e16bc5035 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace at::native { + +using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm); +DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub); + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h new file mode 100644 index 0000000000000000000000000000000000000000..b4dfa689b1d216cb697076781935afb81a587fae --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorCompare.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { +class Tensor; +struct TensorIterator; +struct TensorIteratorBase; +} + +namespace at::native { + +using reduce_minmax_fn = + void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool); +using structured_reduce_minmax_fn = + void (*)(const Tensor&, const Tensor&, const Tensor&, int64_t, bool); + +DECLARE_DISPATCH(structured_reduce_minmax_fn, max_stub); +DECLARE_DISPATCH(structured_reduce_minmax_fn, min_stub); + +using where_fn = void (*)(TensorIterator &); +DECLARE_DISPATCH(where_fn, where_kernel); + +using is_infinity_op_fn = void (*)(TensorIteratorBase &); +DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub); +DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub); + +using mode_fn = void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool); +DECLARE_DISPATCH(mode_fn, mode_stub); + +using clamp_tensor_fn = void (*)(TensorIteratorBase &); +DECLARE_DISPATCH(clamp_tensor_fn, clamp_stub); + +namespace detail { + enum class ClampLimits {Min, Max, MinMax}; +} + +DECLARE_DISPATCH(void (*)(TensorIteratorBase &, const c10::Scalar&, const c10::Scalar&), clamp_scalar_stub); +DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_min_scalar_stub); +DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_max_scalar_stub); + +using isin_default_fn = void (*)(const Tensor&, const Tensor&, bool, const Tensor&); +DECLARE_DISPATCH(isin_default_fn, isin_default_stub); + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h new file mode 100644 index 0000000000000000000000000000000000000000..e55d2a58d709926a24467a0056323096e0890fa9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorIterator.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorTransformations.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorTransformations.h new file mode 100644 index 0000000000000000000000000000000000000000..f69c27edb976a4157dca1b0e0a38d748cdef9848 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TensorTransformations.h @@ -0,0 +1,30 @@ +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include + +namespace at::native { + +static inline Tensor roll_common(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) { + TORCH_CHECK(!shifts.empty(), "`shifts` required"); + if (dims.empty() && shifts.size() == 1) { + auto flattened = self.contiguous().view(self.numel()); + return roll(flattened, shifts[0], 0).view(self.sizes()); + } + TORCH_CHECK( + shifts.size() == dims.size(), + "shifts and dimensions must align. shifts: ", shifts.size(), ", dims:", dims.size() + ); + AT_ASSERT(dims.size() > 1); + auto tail_shifts = shifts.slice(1); + auto tail_dims = dims.slice(1); + auto first_dim_rolled = roll(self, shifts[0], dims[0]); + return at::roll(first_dim_rolled, tail_shifts, tail_dims); +} + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h new file mode 100644 index 0000000000000000000000000000000000000000..2d4845c758461c3435c83eaf7cafa3ddd6c9d784 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include + +namespace at::native { + +struct ResultTypeState { + c10::ScalarType dimResult = ScalarType::Undefined; + c10::ScalarType wrappedResult = ScalarType::Undefined; + c10::ScalarType zeroResult = ScalarType::Undefined; +}; + +TORCH_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state); +TORCH_API ResultTypeState update_result_type_state(const Scalar& scalar, const ResultTypeState& in_state); +TORCH_API ScalarType result_type(const ResultTypeState& state); + +TORCH_API ScalarType result_type(ITensorListRef tensors); + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/layer_norm.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/layer_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..13fb1e4783d20bbbc68cfebc9cf609ba3274bb25 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/native/layer_norm.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +namespace at::native { + +namespace { + +C10_ALWAYS_INLINE std::pair _check_layer_norm_inputs( + const Tensor& input, + IntArrayRef normalized_shape, + const Tensor& weight /* optional */, + const Tensor& bias /* optional */) { + + const int normalized_ndim = normalized_shape.size(); + TORCH_CHECK( + normalized_ndim >= 1, + "Expected normalized_shape to be at least 1-dimensional, i.e., ", + "containing at least one element, but got normalized_shape = ", + normalized_shape); + TORCH_CHECK( + !weight.defined() || weight.sizes().equals(normalized_shape), + "Expected weight to be of same shape as normalized_shape, but got ", + "weight of shape ", + weight.sizes(), + " and normalized_shape = ", + normalized_shape); + TORCH_CHECK( + !bias.defined() || bias.sizes().equals(normalized_shape), + "Expected bias to be of same shape as normalized_shape, but got ", + "bias of shape ", + bias.sizes(), + " and normalized_shape = ", + normalized_shape); + + const auto input_shape = input.sizes(); + const auto input_ndim = input.dim(); + + if (input_ndim < normalized_ndim || + !input_shape.slice(input_ndim - normalized_ndim) + .equals(normalized_shape)) { + std::stringstream ss; + ss << "Given normalized_shape=" << normalized_shape + << ", expected input with shape [*"; + for (auto size : normalized_shape) { + ss << ", " << size; + } + ss << "], but got input of size" << input_shape; + AT_ERROR(ss.str()); + } + + const int axis = input_ndim - normalized_ndim; + const int64_t M = + c10::multiply_integers(input_shape.cbegin(), input_shape.cbegin() + axis); + const int64_t N = + c10::multiply_integers(input_shape.cbegin() + axis, input_shape.cend()); + + return std::make_pair(M, N); +} + +} // namespace + +void layer_norm_cpu_out( + at::Tensor& out, + const at::Tensor& input, + const Tensor& gamma, + const Tensor& beta, + double eps, + int64_t M, + int64_t N); + +using forward_fn = void (*)( + const Tensor& /* X */, + const Tensor& /* gamma */, + const Tensor& /* beta */, + int64_t /* M */, + int64_t /* N */, + double /* eps */, + Tensor* /* Y */, + Tensor* /* mean */, + Tensor* /* rstd */); + +using backward_fn = void (*)( + const Tensor& /* dY */, + const Tensor& /* X */, + const Tensor& /* mean */, + const Tensor& /* rstd */, + const Tensor& /* gamma */, + int64_t /* M */, + int64_t /* N */, + Tensor* /* dX */, + Tensor* /* dgamma */, + Tensor* /* dbeta */); + +DECLARE_DISPATCH(forward_fn, LayerNormKernel); +DECLARE_DISPATCH(backward_fn, LayerNormBackwardKernel); + +} // namespace at::native diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..2efde9d5f2f2e6ee4a074389aa9dcec7c35459d2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/accumulate_grad.h @@ -0,0 +1,277 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include + +namespace torch { +namespace autograd { + +#define CHECK_RESULT(RESULT, VAR) \ + if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \ + VAR.is_sparse_csr())) { \ + if (!utils::obeys_layout_contract(RESULT, VAR)) { \ + TORCH_WARN_ONCE( \ + "grad and param do not obey the gradient layout contract. " \ + "This is not an error, but may impair performance.\n" \ + "grad.sizes() = ", \ + RESULT.sizes(), \ + ", strides() = ", \ + RESULT.strides(), \ + "\n", \ + "param.sizes() = ", \ + VAR.sizes(), \ + ", strides() = ", \ + VAR.strides()); \ + } \ + } + +struct TORCH_API AccumulateGrad : public Node { + explicit AccumulateGrad(Variable variable_); + + variable_list apply(variable_list&& grads) override; + + std::vector>& tensor_pre_hooks() noexcept + override { + // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor, + // it can be destroyed even though the Tensor is still alive (contrary + // to all other Nodes). So we must lazily read the Tensor hooks here. + return impl::hooks(variable); + } + + std::unique_ptr& tensor_post_acc_grad_hooks() noexcept + override { + // NB: Since the AccumulateGrad Node is only a weak ref from the Tensor, + // it can be destroyed even though the Tensor is still alive (contrary + // to all other Nodes). So we must lazily read the Tensor hooks here. + return impl::post_acc_grad_hooks(variable); + } + + // Given a variable with its current grad as variable_grad, accumulates + // new_grad into variable_grad if in place accumulation is possible. + // Otherwise, uses 'update_grad' to update the grad for the variable. + + // "Gradient Layout Contract" + // + // AccumulateGrad tries to stash strided (non-sparse) grads with memory layout + // (strides) such that variables and grads interact efficiently in later + // optimizer kernels, and grads interact efficiently with c10d::Reducer.cpp. + // + // Specifically, AccumulateGrad tries to ensure the following + // (cf torch/csrc/autograd/utils/grad_layout_contract.h): + // (1) if variable.is_non_overlapping_and_dense(), the stashed grad's + // strides match variable. + // (2) else, stashed grad is rowmajor contiguous. + // If variable's grad does not exist (!variable_grad.defined()) + // AccumulateGrad steals new_grad if it's stealable and obeys the contract + // already, otherwise it deep copies new_grad into an obedient clone. + // + // If variable's grad already exists (variable_grad.defined()), new_grad must + // be added to variable_grad. If we aren't setting up for double backward + // (!GradMode::is_enabled()), AccumulateGrad performs "variable_grad += + // new_grad" in-place, which keeps variable_grad's layout. We assume (hope) + // variable_grad was created obeying (1) or (2) at some point in the past. + // + // If we are setting up for double backward, AccumulateGrad updates the grad + // out-of-place via "variable_grad + new_grad." TensorIterator operator+ + // decides result's layout. Typically TensorIterator matches strides of the + // first arg, so we once again assume (hope) variable_grad was originally + // created obeying (1) or (2). + // + // AccumulateGrad does not enforce the contract with 100% certainty. Examples: + // - If a user manually permutes a param or its grad, then runs a fwd+bwd, + // variable_grad += new_grad keeps variable_grad's layout without + // rechecking the contract. + // - If TensorIterator changes its corner cases about operator+'s result + // (for example, giving more or less priority to channels_last inputs, see + // https://github.com/pytorch/pytorch/pull/37968) the result may not obey. + // + // Fortunately, if a given grad doesn't satisfy (1) or (2), the penalty is + // degraded performance in Reducer.cpp or optimizer kernels, not death by + // assert or silently bad numerics. + + // variable: the variable whose grad we're accumulating. + // variable_grad: the current grad for the variable. + // new_grad: new grad we want to accumulate for the variable. + // num_expected_refs: the number of refs we expect to hold internally + // such that it is safe to avoid cloning the grad + // if use_count() of the grad is less than or equal + // to this value (in addition to post_hooks). + // update_grad: Function that is used to update grad for the variable. + // The argument to the function is a Tensor which + // is used to set a new value for the grad. + template + static void accumulateGrad( + const Variable& variable, + at::Tensor& variable_grad, + const at::Tensor& new_grad, + size_t num_expected_refs, + const T& update_grad) { + if (!variable_grad.defined()) { + if (!GradMode::is_enabled() && !new_grad.is_sparse() && + !new_grad.is_sparse_csr() && + !(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) && + at::caching::adjusted_use_count(new_grad) <= num_expected_refs && + (new_grad.is_mkldnn() || + utils::obeys_layout_contract(new_grad, variable))) { + // we aren't setting up for double-backward + // not sparse + // no other user-visible tensor references new_grad + // new_grad obeys the "Gradient Layout Contract", there has a special + // case, For MKLDNN tensor, which is a opaque tensor, assuming it obeys + // layout_contract. Under these conditions, we can steal new_grad + // without a deep copy. + update_grad(new_grad.detach()); + } else if ( + !GradMode::is_enabled() && new_grad.is_sparse() && + new_grad._indices().is_contiguous() && + new_grad._values().is_contiguous() && + // Use count for indices and values should always be <=1 since the + // SparseTensor should be the only one holding a reference to these. + new_grad._indices().use_count() <= 1 && + new_grad._values().use_count() <= 1 && + new_grad.use_count() <= num_expected_refs) { + // Can't detach sparse tensor (since metadata changes are not allowed + // after detach), so just create a new one for the grad which is a + // shallow copy. We need a shallow copy so that modifying the original + // grad tensor doesn't modify the grad we accumulate. + // We only skip clone if indices and values themselves are contiguous + // for backward compatibility reasons. Since without this optimization, + // earlier we would clone the entire SparseTensor which cloned indices + // and values. + // For details see https://github.com/pytorch/pytorch/issues/34375. + + // No scenario where we expect this to be true currently + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !at::caching::is_cached_tensor(new_grad._indices()) && + !at::caching::is_cached_tensor(new_grad._values()) && + !at::caching::is_cached_tensor(new_grad)); + + update_grad(at::_sparse_coo_tensor_unsafe( + new_grad._indices(), + new_grad._values(), + new_grad.sizes(), + new_grad.options())); + } else { + if (new_grad.is_sparse() || new_grad.is_sparse_csr() || + new_grad.is_nested()) { + update_grad(new_grad.clone()); + } else { + if (new_grad.is_mkldnn()) { + update_grad(new_grad.clone()); + } else { + // Deep copies new_grad according to the "Gradient Layout Contract." + update_grad(utils::clone_obey_contract(new_grad, variable)); + } + } + } + } else if (!GradMode::is_enabled()) { + // This case is not strictly necessary, but it makes the first-order only + // case slightly more efficient. + if (variable_grad.is_sparse() && !new_grad.is_sparse()) { + // If `variable_grad` is sparse and `new_grad` is not sparse, their + // sum is not sparse, and we must change the TensorImpl type of + // `variable_grad` for it to store the result. However, changing the + // TensorImpl type of a tensor requires changing the tensor itself, and + // thus in this case we have to change the grad tensor. + auto result = new_grad + variable_grad; + CHECK_RESULT(result, variable); + update_grad(std::move(result)); + } else if (!at::inplaceIsVmapCompatible(variable_grad, new_grad)) { + // Ideally we'd perform an in-place operation to avoid changing + // the grad tensor. However, if that's impossible because the grads + // are vmap-incompatible (See NOTE: [vmap-incompatible in-place + // operations]), then we just add them out-of-place. + auto result = variable_grad + new_grad; + CHECK_RESULT(result, variable); + update_grad(std::move(result)); + } else { + // In this case we can avoid changing the grad tensor. There are three + // scenarios when we'll hit this case: + // + // 1. `variable_grad` is sparse, and `new_grad` is sparse. + // 2. `variable_grad` is dense, and `new_grad` is sparse. + // 3. `variable_grad` is dense, and `new_grad` is dense. + // 4. `variable_grad` is mkldnn, and `new_grad` is mkldnn. + // + // In all of these four cases, `variable_grad += new_grad` is a + // valid operation which adds `new_grad` to `variable_grad` in + // place. `variable_grad` is thus still referring to the same tensor + // after the operation. + // Also DistributedDataParallel(DDP) package relies on grad being + // mutated in place for saving peak memory usage. DDP will still + // work correctly if it is mutated out of place here, but DDP will + // maintain one extra copy of grad tensors in buffer and thus + // increase peak memory usage. + variable_grad += new_grad; + CHECK_RESULT(variable_grad, variable); + // ^ We could enforce the contract more aggressively here by writing: + // if (variable_grad.is_sparse() || new_grad.is_sparse()) { + // variable_grad += new_grad; + // } else if (obeys_layout_contract(variable_grad, variable)) { + // variable_grad += new_grad; + // } else { + // result = at::empty_strided(variable.sizes(), variable.strides(), + // variable.options().memory_format(c10::nullopt)); + // update_grad(at::native::add_out(result, variable_grad, + // new_grad, 1.0); + // } + // However, that accumulation is sometimes in place and sometimes not, + // which may break user code. + } + } else { + at::Tensor result; + if (variable_grad.is_sparse() && !new_grad.is_sparse()) { + // CPU backend throws an error on sparse + dense, so prefer dense + + // sparse here. + result = new_grad + variable_grad; + } else { + // Assumes operator+ result typically matches strides of first arg, + // and hopes variable_grad was originally created obeying layout + // contract. + result = variable_grad + new_grad; + } + CHECK_RESULT(result, variable); + update_grad(std::move(result)); + // ^ We could enforce the contract more aggressively here by saying + // if (obeys_layout_contract(new_grad, variable)) { + // update_grad(new_grad + variable_grad); + // } else { + // update_grad(variable_grad + new_grad); + // } + // such that the stashed grad is likely to have the right strides if + // either variable_grad or new_grad already has the right strides. + // We could enforce the contract with certainty by saying + // auto result = variable_grad + new_grad (or vice versa), checking + // result's layout, and copying to an obedient clone if necessary before + // update_grad. The copy would require another gmem pass. We can't create + // empty result with the right layout then add_out into it with a single + // kernel, because GradMode is enabled in this branch, and add_out isn't + // differentiable. Maybe more trouble than it's worth. + } + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + Variable variable; +}; + +#undef CHECK_RESULT + +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2248c7082d8cb6e963b14d68fc71e00432141e53 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/basic_ops.h @@ -0,0 +1,111 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace autograd { + +struct TORCH_API Error : public Node { + Error(std::string msg, edge_list&& next_edges) + : Node(std::move(next_edges)), msg(std::move(msg)) {} + + Error(std::string msg) : msg(std::move(msg)) {} + + variable_list apply(variable_list&& inputs) override; + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + std::string msg; +}; + +// We print grad_fn names in tensor printing. For functions with backward +// NYI, grad_fn= will be printed if we use Error, which is confusing. So +// special case with a new NotImplemented function here. +struct TORCH_API NotImplemented : public Error { + NotImplemented(const std::string& forward_fn, edge_list&& next_edges) + : Error( + "derivative for " + forward_fn + " is not implemented", + std::move(next_edges)) {} + + NotImplemented(const std::string& forward_fn) + : Error("derivative for " + forward_fn + " is not implemented") {} +}; + +// Identity in forward, Error in backward. Used to implement +// @once_differentiable +struct TORCH_API DelayedError : public Node { + DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) { + // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) + for (const auto i : c10::irange(num_inputs)) { + (void)i; // Suppress unused variable warning + add_input_metadata(Node::undefined_input()); + } + } + + variable_list apply(variable_list&& inputs) override; + + std::string msg; +}; + +struct TORCH_API UndefinedGrad : public Node { + UndefinedGrad() { + add_input_metadata(Node::undefined_input()); + } + + variable_list apply(variable_list&& inputs) override; +}; + +struct TORCH_API UndefinedGradBackward : public Node { + UndefinedGradBackward(edge_list&& next_edges) : Node(std::move(next_edges)) {} + + UndefinedGradBackward() = default; + + variable_list apply(variable_list&& inputs) override; + + void compiled_args(CompiledNodeArgs& args) override {} + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override { + return apply(variable_list(inputs)); + } +}; + +struct TORCH_API GraphRoot : public Node { + GraphRoot(edge_list functions, variable_list inputs) + : Node(std::move(functions)), outputs(std::move(inputs)) { + // Ensures calls to stream() on a GraphRoot instance reflect current + // stream(s) on devices of root grad tensors at the time the instance is + // constructed. + for (const auto& t : outputs) { + add_input_metadata(t); + } + } + + variable_list apply(variable_list&& inputs) override { + return outputs; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + variable_list outputs; +}; + +struct TORCH_API Identity : public Node { + variable_list apply(variable_list&& inputs) override; +}; + +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h new file mode 100644 index 0000000000000000000000000000000000000000..9b1f0daf50bce3dc2d34cffc99274db916643a5f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/comm.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace torch { +namespace autograd { + +struct TORCH_CUDA_CU_API Scatter : public Node { + explicit Scatter( + std::vector devices, + c10::optional> chunk_sizes = c10::nullopt, + int64_t dim = 0, + c10::optional>> streams = + c10::nullopt, + bool unsqueeze_scalars = false); + ~Scatter() override; + + variable_list apply(variable_list&& inputs) override; + + std::vector devices_; + c10::optional> chunk_sizes_; + int64_t dim_; + c10::optional>> streams_; + bool unsqueeze_scalars_; +}; + +struct TORCH_CUDA_CU_API Gather : public Node { + explicit Gather(const at::Device& destination_device, int64_t dim = 0); + ~Gather() override; + + variable_list apply(variable_list&& inputs) override; + + at::Device destination_device_; + int64_t dim_; +}; + +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..94b3c9c679969b5bf3b6e9581fb06125d236c1fb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/pybind.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace py = pybind11; + +namespace pybind11 { +namespace detail {} +} // namespace pybind11 diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..29f9259170f3471efaadeedc79f763a6345f6cd0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/tensor.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +#include +#include + +namespace torch { +namespace autograd { + +struct TORCH_API CopyBackwards : public Node { + variable_list apply(variable_list&& grads) override; + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + at::TensorOptions src_options; +}; + +// Note [View + Inplace update for base tensor] +// +// This note covers a few important topics related to view + inplace handling. +// - It explains what is the CopySlices Node and why we need it. +// - It explains the considerations on what is saved for backward in +// CopySlices. +// - It explains why we need to sometimes change the exec_info of the current +// backward +// +// What is CopySlices? +// ~~~~~~~~~~~~~~~~~~~ +// +// We support autograd with inplace mutation; e.g., if you write x.mul_(2) +// the autograd will work as if you now had multiple Tensors under the hood and +// you did +// x = t.clone() +// x0 = x +// x1 = x0 * 2 +// x = x1 +// As you can see here, after this operation, x.grad_fn now points to x1.grad_fn +// (the MulBackward node) and this node points to x's original grad_fn (which is +// also x0.grad_fn). It is important to keep in mind that after the inplace, +// there is no Tensor object that represents the x0 state anymore. But the graph +// for it is still around in autograd (in case x was used before being modified +// inplace). See Example 1 in +// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE +// We call this rebasing the history of the Tensor. +// +// Now, a difficult situation is what happens if x is a differentiable view +// of a base b. +// b = t.clone() +// x = b.select(0, 0) +// x *= 2 +// With the same approach as above, this will become +// b = t.clone() +// x = b.select(0, 0) +// b0 = b +// x0 = x +// x1 = x0 * 2 +// b1 = b0.select_scatter(x1, 0, 0) +// x2 = b1.select(0, 0) +// x = x2 +// b = b1 +// As you can see here, not only we need to modify x's grad_fn, we also need to +// modify the one from b. We also need to ensure that the new grad_fn on x is +// linked to b's new grad_fn. The chain the select_scatter, multiplication and +// select is what CopySlices does, all wrapped into a single Node. +// +// See Example 1 in +// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE +// +// What do we need to save in CopySlices to run backward? +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// We need to perform grad_view = fn(grad_view), but out-of-place. +// view_fn_ is an optional lambda function saved in DifferentiableViewMeta +// from forward pass, so that we can recover we when as_strided is not +// supported. It preserves the invariants: +// view = view_fn_(base) +// grad_view = view_fn_(grad_base) +// +// When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_ +// is empty and we save TensorGeometry(view) instead. +// With the TensorGeometry information we can use `as_strided` call which +// is more efficient to recover views in backward. +// +// For example: +// view_1 = view_op_1(base) +// view_2 = view_op_2(view_1) +// ... +// view_n = view_op_n(view_n-1) +// view_n = inplace_op(view_n) +// +// In CPU/CUDA case where we support efficient as_strided implementation, +// grad_view_n can be calculated through 1 step. +// +// grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset); +// +// But in XLA backend where we don't have full support of as_strided, +// it has to save a chained lambda function view_fn_, to exactly +// replay how the view was done in forward. +// +// view_fn_ = view_op_n(...(view_op_2(view_op_1()))) +// grad_view_n = view_fn_(grad_base) +// +// This chain view_fn_ works as long as forward view ops are implemented, +// e.g XLA simulates view without a real Storage behind Tensor, but it's less +// efficient than the as_strided one so we should be careful to only use it when +// necessary. +// +// - For CPU/CUDA we save TensorGeometry of both base and view tensors, +// That's all we need to pass into as_strided. +// E.g. int[] sizes, int[] strides, and int storage_offset. +// - For XLA we use view_fn_, which captures all forward view op arguments +// by **value**. +// E.g for at::narrow, int dim, int start, in length are saved. +// +// Theoretically we could also save Tensor `view` in CopySlices Node, but +// it's far more expensive than what we currently save. +// 1. We cannot afford keeping large tensors alive to recover views only. +// 2. There are inplace checks when Tensors are loaded back to make sure +// they haven't been changed (including size metadata). +// So saving metadata like TensorGeometry/view arguments is much better +// because it is minimal information needed to recover views, as well as it +// allows the user to modify the original Tensor without preventing the +// backward pass from running. +// +// Why do we manually change exec_info in the apply? +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// Using the same example as before, +// b = t.clone() +// x = b.select(0, 0) +// x *= y +// +// You can see the visualization at +// https://docs.google.com/drawings/d/1Bx-Hcz-zlIv7PabQqnPhUIVIs9F8WWi48svqMsAUMFs +// which contains the wrapped MulBackward Node and show what it links to. +// Since a backward can happen between any subset of the inputs (t and y) and +// outputs (o, x, b). It is possible to get into a state where CopySlices's 0th +// next function (CloneBackward) needs gradient but MulBackward's 0th next +// function (SelectBackward) is not. This happens if you do autograd.grad +// between x and t for example. +// In such a case, we do need to mark SelectBackward as requiring gradient such +// that, during the execution of MulBackward, we will actually compute gradient +// for the 0th input. +// +// All the other next functions are always shared (this is asserted in the apply +// code) and so nothing needs to be done for them. + +// See Note [View + Inplace update for view tensor] for what we do to view +// tensor when an in-place operation happens. +struct TORCH_API CopySlices : public Node { + CopySlices( + const Variable& base_var, + at::TensorGeometry view_, + std::function view_fn_, + std::shared_ptr fn_); + + // common code between apply/apply_with_saved + template + variable_list apply_impl(variable_list&& inputs, const T& call_fn); + + variable_list apply(variable_list&& inputs) override; + void release_variables() override; + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved( + const variable_list& inputs, + SwapSavedVariables& saved) override; + + at::TensorGeometry base; + // view and view_fn are redundant and view_fn will be used if available. + // See Note [View + Inplace update for base tensor] for details. + at::TensorGeometry view; + std::function view_fn; + std::shared_ptr fn; +}; + +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..f30ce082a70d781ee551679f9ad23bd9010d371e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/functions/utils.h @@ -0,0 +1,122 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +namespace torch { +namespace autograd { + +using function_constructor = std::function(edge_list&&)>; + +/** + * Wraps the tensor outputs in variables and creates the grad_fn and sets the + * grad_fn if necessary. + */ +TORCH_API variable_list wrap_outputs( + const variable_list& inputs, + tensor_list&& outputs, + const function_constructor& ctr); + +/// Checks that inputs contains exactly `args` items and that the first +/// `required_args` +/// items are not nullptr. If not specified, `required_args` defaults to `args`. +TORCH_API void check_input_variables( + const char* name, + const variable_list& inputs, + int args, + int required_args = -1, + bool allow_undefined = false); + +struct ComputeRequiresGrad : IterArgs { + bool out = false; + using IterArgs::operator(); + void operator()(const at::Tensor& tensor) { + const auto& var = static_cast(tensor); + if (var.defined() && var.requires_grad()) { + out = true; + } + } + void operator()(const c10::optional& tensor) { + if (tensor.has_value()) { + (*this)(*tensor); + } + } + bool short_circuit() { + return out; + } +}; + +template +inline bool compute_requires_grad(Args&&... args) { + if (!GradMode::is_enabled()) { + return false; + } + return ComputeRequiresGrad().apply(std::forward(args)...).out; +} + +inline void set_history( + at::Tensor& variable, + const std::shared_ptr& grad_fn) { + TORCH_CHECK(grad_fn != nullptr); + if (variable.defined()) { + // If the codegen triggers this, you most likely want to add your newly + // added function to the DONT_REQUIRE_DERIVATIVE list in + // tools/autograd/gen_variable_type.py + TORCH_INTERNAL_ASSERT(isDifferentiableType(variable.scalar_type())); + auto output_nr = grad_fn->add_input_metadata(variable); + impl::set_gradient_edge(variable, {grad_fn, output_nr}); + } else { + grad_fn->add_input_metadata(Node::undefined_input()); + } +} + +inline void set_history( + std::vector&& variables, + const std::shared_ptr& grad_fn) { + for (auto& variable : variables) { + set_history(variable, grad_fn); + } +} + +inline void set_history( + std::vector& variables, + const std::shared_ptr& grad_fn) { + for (auto& variable : variables) { + set_history(variable, grad_fn); + } +} + +inline bool isFwGradDefined(const c10::optional& t) { + return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined(); +} + +inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) { + bool ret = false; + for (auto& variable : variables) { + ret |= isFwGradDefined(variable); + } + return ret; +} + +inline bool isFwGradDefinedTensorList( + const c10::List>& li) { + bool ret = false; + for (auto i : c10::irange(li.size())) { + auto t = li.get(i); + ret |= (t.has_value() && isFwGradDefined(t.value())); + } + return ret; +} + +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h new file mode 100644 index 0000000000000000000000000000000000000000..5808de421917db06202e6cd64bad56a3e3697676 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h @@ -0,0 +1,14640 @@ +#pragma once + +// @generated from ../tools/autograd/templates/Functions.h + +#include +#include +#include + +#include "torch/csrc/autograd/function.h" +#include "torch/csrc/autograd/variable.h" +#include "torch/csrc/autograd/saved_variable.h" +#include + +#include + +namespace torch { namespace autograd { namespace generated { + +using at::Scalar; +using at::Tensor; +using at::IntArrayRef; +using at::ArrayRef; +using at::Type; +using at::TensorGeometry; +using at::ScalarType; +using c10::optional; +using c10::fmap; + +inline std::vector unpack_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + // NB: we must explicitly do the conversion in the lambda, otherwise template + // deduction will give a Tensor of Variable which is not convertible + return fmap(xs, [&saved_for](const SavedVariable& x) { + // TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring. + return static_cast(x.unpack(saved_for)); + }); +} + +inline c10::List> unpack_opt_list(at::ArrayRef xs, std::shared_ptr saved_for = nullptr) { + torch::List> result; + result.reserve(xs.size()); + for (const SavedVariable& v : xs) { + auto var = v.unpack(saved_for); + result.push_back(var.defined() ? c10::optional(var) : c10::nullopt); + } + return result; +} + +using torch::autograd::TypeAndSize; + +#ifdef _WIN32 +struct AbsBackward0 : public TraceableFunction { + TORCH_API AbsBackward0() = default; +#else +struct TORCH_API AbsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AbsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AcosBackward0 : public TraceableFunction { + TORCH_API AcosBackward0() = default; +#else +struct TORCH_API AcosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AcosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AddBackward0 : public TraceableFunction { + TORCH_API AddBackward0() = default; +#else +struct TORCH_API AddBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType other_scalar_type; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct AddBackward1 : public TraceableFunction { + TORCH_API AddBackward1() = default; +#else +struct TORCH_API AddBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct AddbmmBackward0 : public TraceableFunction { + TORCH_API AddbmmBackward0() = default; +#else +struct TORCH_API AddbmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddbmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + batch1_.reset_data(); + batch2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable batch1_; + c10::SymInt batch1_sym_argsize_0; + c10::SymInt batch1_sym_argsize_1; + SavedVariable batch2_; + c10::SymInt batch2_sym_argsize_2; + at::Scalar beta; + +}; +#ifdef _WIN32 +struct AddcdivBackward0 : public TraceableFunction { + TORCH_API AddcdivBackward0() = default; +#else +struct TORCH_API AddcdivBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddcdivBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + tensor1_.reset_data(); + tensor2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + SavedVariable tensor1_; + at::ScalarType tensor1_scalar_type; + SavedVariable tensor2_; + at::ScalarType tensor2_scalar_type; + at::Scalar value; + +}; +#ifdef _WIN32 +struct AddcmulBackward0 : public TraceableFunction { + TORCH_API AddcmulBackward0() = default; +#else +struct TORCH_API AddcmulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddcmulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + tensor1_.reset_data(); + tensor2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + SavedVariable tensor1_; + at::ScalarType tensor1_scalar_type; + SavedVariable tensor2_; + at::ScalarType tensor2_scalar_type; + at::Scalar value; + +}; +#ifdef _WIN32 +struct AddmmBackward0 : public TraceableFunction { + TORCH_API AddmmBackward0() = default; +#else +struct TORCH_API AddmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat1_.reset_data(); + mat2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat1_; + at::Layout mat1_layout; + std::vector mat1_sym_sizes; + std::vector mat1_sym_strides; + SavedVariable mat2_; + at::Layout mat2_layout; + std::vector mat2_sym_sizes; + std::vector mat2_sym_strides; + +}; +#ifdef _WIN32 +struct SparseAddmmBackward0 : public TraceableFunction { + TORCH_API SparseAddmmBackward0() = default; +#else +struct TORCH_API SparseAddmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseAddmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat1_.reset_data(); + mat2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat1_; + SavedVariable mat2_; + at::Layout mat2_layout; + std::vector mat2_sym_sizes; + std::vector mat2_sym_strides; + +}; +#ifdef _WIN32 +struct AddmvBackward0 : public TraceableFunction { + TORCH_API AddmvBackward0() = default; +#else +struct TORCH_API AddmvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddmvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat_.reset_data(); + vec_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat_; + SavedVariable vec_; + +}; +#ifdef _WIN32 +struct AddrBackward0 : public TraceableFunction { + TORCH_API AddrBackward0() = default; +#else +struct TORCH_API AddrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AddrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + vec1_.reset_data(); + vec2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable vec1_; + SavedVariable vec2_; + +}; +#ifdef _WIN32 +struct AffineGridGeneratorBackward0 : public TraceableFunction { + TORCH_API AffineGridGeneratorBackward0() = default; +#else +struct TORCH_API AffineGridGeneratorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AffineGridGeneratorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector size; + +}; +#ifdef _WIN32 +struct AliasBackward0 : public Node { + TORCH_API AliasBackward0() = default; +#else +struct TORCH_API AliasBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AliasBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AngleBackward0 : public TraceableFunction { + TORCH_API AngleBackward0() = default; +#else +struct TORCH_API AngleBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AngleBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AcoshBackward0 : public TraceableFunction { + TORCH_API AcoshBackward0() = default; +#else +struct TORCH_API AcoshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AcoshBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AcoshBackward1 : public TraceableFunction { + TORCH_API AcoshBackward1() = default; +#else +struct TORCH_API AcoshBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AcoshBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AsinhBackward0 : public TraceableFunction { + TORCH_API AsinhBackward0() = default; +#else +struct TORCH_API AsinhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsinhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AsinhBackward1 : public TraceableFunction { + TORCH_API AsinhBackward1() = default; +#else +struct TORCH_API AsinhBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsinhBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AtanhBackward0 : public TraceableFunction { + TORCH_API AtanhBackward0() = default; +#else +struct TORCH_API AtanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AtanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AtanhBackward1 : public TraceableFunction { + TORCH_API AtanhBackward1() = default; +#else +struct TORCH_API AtanhBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AtanhBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AsStridedBackward0 : public Node { + TORCH_API AsStridedBackward0() = default; +#else +struct TORCH_API AsStridedBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct AsStridedBackward1 : public TraceableFunction { + TORCH_API AsStridedBackward1() = default; +#else +struct TORCH_API AsStridedBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct AsinBackward0 : public TraceableFunction { + TORCH_API AsinBackward0() = default; +#else +struct TORCH_API AsinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AtanBackward0 : public TraceableFunction { + TORCH_API AtanBackward0() = default; +#else +struct TORCH_API AtanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AtanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Atan2Backward0 : public TraceableFunction { + TORCH_API Atan2Backward0() = default; +#else +struct TORCH_API Atan2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Atan2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct BaddbmmBackward0 : public TraceableFunction { + TORCH_API BaddbmmBackward0() = default; +#else +struct TORCH_API BaddbmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BaddbmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + batch1_.reset_data(); + batch2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable batch1_; + SavedVariable batch2_; + at::Scalar beta; + +}; +#ifdef _WIN32 +struct BernoulliBackward0 : public TraceableFunction { + TORCH_API BernoulliBackward0() = default; +#else +struct TORCH_API BernoulliBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BernoulliBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct BernoulliBackward1 : public TraceableFunction { + TORCH_API BernoulliBackward1() = default; +#else +struct TORCH_API BernoulliBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BernoulliBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize p_info; + +}; +#ifdef _WIN32 +struct BernoulliBackward2 : public TraceableFunction { + TORCH_API BernoulliBackward2() = default; +#else +struct TORCH_API BernoulliBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BernoulliBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct BmmBackward0 : public TraceableFunction { + TORCH_API BmmBackward0() = default; +#else +struct TORCH_API BmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat2_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mat2_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MatmulBackward0 : public TraceableFunction { + TORCH_API MatmulBackward0() = default; +#else +struct TORCH_API MatmulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MatmulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CatBackward0 : public TraceableFunction { + TORCH_API CatBackward0() = default; +#else +struct TORCH_API CatBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CatBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + ::std::vector tensors_args_scalartypes; + ::std::vector<::std::vector> tensors_args_sizes_symint; + size_t tensors_size_; +}; +#ifdef _WIN32 +struct CauchyBackward0 : public TraceableFunction { + TORCH_API CauchyBackward0() = default; +#else +struct TORCH_API CauchyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CauchyBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct CeilBackward0 : public TraceableFunction { + TORCH_API CeilBackward0() = default; +#else +struct TORCH_API CeilBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CeilBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct CholeskyBackward0 : public TraceableFunction { + TORCH_API CholeskyBackward0() = default; +#else +struct TORCH_API CholeskyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CholeskyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgCholeskyExBackward0 : public TraceableFunction { + TORCH_API LinalgCholeskyExBackward0() = default; +#else +struct TORCH_API LinalgCholeskyExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgCholeskyExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + L_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool upper; + SavedVariable L_; + +}; +#ifdef _WIN32 +struct CholeskySolveBackward0 : public TraceableFunction { + TORCH_API CholeskySolveBackward0() = default; +#else +struct TORCH_API CholeskySolveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CholeskySolveBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input2_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input2_; + SavedVariable self_; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CholeskyInverseBackward0 : public TraceableFunction { + TORCH_API CholeskyInverseBackward0() = default; +#else +struct TORCH_API CholeskyInverseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CholeskyInverseBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ClampBackward0 : public TraceableFunction { + TORCH_API ClampBackward0() = default; +#else +struct TORCH_API ClampBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + max_.reset_data(); + min_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable max_; + SavedVariable min_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampBackward1 : public TraceableFunction { + TORCH_API ClampBackward1() = default; +#else +struct TORCH_API ClampBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional max; + c10::optional min; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMinBackward0 : public TraceableFunction { + TORCH_API ClampMinBackward0() = default; +#else +struct TORCH_API ClampMinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar min; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMinBackward1 : public TraceableFunction { + TORCH_API ClampMinBackward1() = default; +#else +struct TORCH_API ClampMinBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMinBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + min_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable min_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMaxBackward0 : public TraceableFunction { + TORCH_API ClampMaxBackward0() = default; +#else +struct TORCH_API ClampMaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar max; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ClampMaxBackward1 : public TraceableFunction { + TORCH_API ClampMaxBackward1() = default; +#else +struct TORCH_API ClampMaxBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ClampMaxBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + max_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable max_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CloneBackward0 : public TraceableFunction { + TORCH_API CloneBackward0() = default; +#else +struct TORCH_API CloneBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CloneBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ToCopyBackward0 : public TraceableFunction { + TORCH_API ToCopyBackward0() = default; +#else +struct TORCH_API ToCopyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToCopyBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct CoalesceBackward0 : public TraceableFunction { + TORCH_API CoalesceBackward0() = default; +#else +struct TORCH_API CoalesceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CoalesceBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ComplexBackward0 : public TraceableFunction { + TORCH_API ComplexBackward0() = default; +#else +struct TORCH_API ComplexBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ComplexBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + imag_.reset_data(); + real_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable imag_; + SavedVariable real_; + +}; +#ifdef _WIN32 +struct PolarBackward0 : public TraceableFunction { + TORCH_API PolarBackward0() = default; +#else +struct TORCH_API PolarBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PolarBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ConjBackward0 : public Node { + TORCH_API ConjBackward0() = default; +#else +struct TORCH_API ConjBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NegViewBackward0 : public Node { + TORCH_API NegViewBackward0() = default; +#else +struct TORCH_API NegViewBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NegViewBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ConjPhysicalBackward0 : public TraceableFunction { + TORCH_API ConjPhysicalBackward0() = default; +#else +struct TORCH_API ConjPhysicalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjPhysicalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ConjPhysicalBackward1 : public TraceableFunction { + TORCH_API ConjPhysicalBackward1() = default; +#else +struct TORCH_API ConjPhysicalBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjPhysicalBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct CopysignBackward0 : public TraceableFunction { + TORCH_API CopysignBackward0() = default; +#else +struct TORCH_API CopysignBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CopysignBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CopysignBackward1 : public TraceableFunction { + TORCH_API CopysignBackward1() = default; +#else +struct TORCH_API CopysignBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CopysignBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CosBackward0 : public TraceableFunction { + TORCH_API CosBackward0() = default; +#else +struct TORCH_API CosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CoshBackward0 : public TraceableFunction { + TORCH_API CoshBackward0() = default; +#else +struct TORCH_API CoshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CoshBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LinalgCrossBackward0 : public TraceableFunction { + TORCH_API LinalgCrossBackward0() = default; +#else +struct TORCH_API LinalgCrossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgCrossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogcumsumexpBackward0 : public TraceableFunction { + TORCH_API LogcumsumexpBackward0() = default; +#else +struct TORCH_API LogcumsumexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogcumsumexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CumprodBackward0 : public TraceableFunction { + TORCH_API CumprodBackward0() = default; +#else +struct TORCH_API CumprodBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CumprodBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::ScalarType self_scalar_type; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CumsumBackward0 : public TraceableFunction { + TORCH_API CumsumBackward0() = default; +#else +struct TORCH_API CumsumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CumsumBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct CummaxBackward0 : public TraceableFunction { + TORCH_API CummaxBackward0() = default; +#else +struct TORCH_API CummaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CummaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct CumminBackward0 : public TraceableFunction { + TORCH_API CumminBackward0() = default; +#else +struct TORCH_API CumminBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CumminBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct ConvTbcBackward0 : public TraceableFunction { + TORCH_API ConvTbcBackward0() = default; +#else +struct TORCH_API ConvTbcBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvTbcBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + bias_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable bias_; + int64_t pad = 0; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct CtcLossBackward0 : public TraceableFunction { + TORCH_API CtcLossBackward0() = default; +#else +struct TORCH_API CtcLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CtcLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + log_probs_.reset_data(); + targets_.reset_data(); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t blank = 0; + std::vector input_lengths; + SavedVariable log_probs_; + std::vector target_lengths; + SavedVariable targets_; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct CtcLossBackward1 : public TraceableFunction { + TORCH_API CtcLossBackward1() = default; +#else +struct TORCH_API CtcLossBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CtcLossBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_lengths_.reset_data(); + log_probs_.reset_data(); + target_lengths_.reset_data(); + targets_.reset_data(); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t blank = 0; + SavedVariable input_lengths_; + SavedVariable log_probs_; + SavedVariable target_lengths_; + SavedVariable targets_; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct Deg2RadBackward0 : public TraceableFunction { + TORCH_API Deg2RadBackward0() = default; +#else +struct TORCH_API Deg2RadBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Deg2RadBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LinalgDetBackward0 : public TraceableFunction { + TORCH_API LinalgDetBackward0() = default; +#else +struct TORCH_API LinalgDetBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgDetBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + LU_.reset_data(); + pivots_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + SavedVariable LU_; + SavedVariable pivots_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgSlogdetBackward0 : public TraceableFunction { + TORCH_API LinalgSlogdetBackward0() = default; +#else +struct TORCH_API LinalgSlogdetBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSlogdetBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + LU_.reset_data(); + pivots_.reset_data(); + sign_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + SavedVariable LU_; + SavedVariable pivots_; + SavedVariable sign_; + +}; +#ifdef _WIN32 +struct BlockDiagBackward0 : public TraceableFunction { + TORCH_API BlockDiagBackward0() = default; +#else +struct TORCH_API BlockDiagBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BlockDiagBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + ::std::vector tensors_args_scalartypes; + ::std::vector<::std::vector> tensors_args_sizes; + size_t tensors_size_; +}; +#ifdef _WIN32 +struct DiagEmbedBackward0 : public TraceableFunction { + TORCH_API DiagEmbedBackward0() = default; +#else +struct TORCH_API DiagEmbedBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagEmbedBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + +}; +#ifdef _WIN32 +struct DiagonalBackward0 : public Node { + TORCH_API DiagonalBackward0() = default; +#else +struct TORCH_API DiagonalBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct DiagonalBackwardBackward0 : public TraceableFunction { + TORCH_API DiagonalBackwardBackward0() = default; +#else +struct TORCH_API DiagonalBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + +}; +#ifdef _WIN32 +struct DistBackward0 : public TraceableFunction { + TORCH_API DistBackward0() = default; +#else +struct TORCH_API DistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct DivBackward0 : public TraceableFunction { + TORCH_API DivBackward0() = default; +#else +struct TORCH_API DivBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DivBackward1 : public TraceableFunction { + TORCH_API DivBackward1() = default; +#else +struct TORCH_API DivBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DivBackward2 : public TraceableFunction { + TORCH_API DivBackward2() = default; +#else +struct TORCH_API DivBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + c10::optional rounding_mode; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DivBackward3 : public TraceableFunction { + TORCH_API DivBackward3() = default; +#else +struct TORCH_API DivBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DivBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + c10::optional rounding_mode; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct DotBackward0 : public TraceableFunction { + TORCH_API DotBackward0() = default; +#else +struct TORCH_API DotBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DotBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + tensor_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable tensor_; + +}; +#ifdef _WIN32 +struct VdotBackward0 : public TraceableFunction { + TORCH_API VdotBackward0() = default; +#else +struct TORCH_API VdotBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "VdotBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FusedDropoutBackward0 : public TraceableFunction { + TORCH_API FusedDropoutBackward0() = default; +#else +struct TORCH_API FusedDropoutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FusedDropoutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct NativeDropoutBackward0 : public TraceableFunction { + TORCH_API NativeDropoutBackward0() = default; +#else +struct TORCH_API NativeDropoutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeDropoutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + c10::optional train; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct NativeDropoutBackwardBackward0 : public TraceableFunction { + TORCH_API NativeDropoutBackwardBackward0() = default; +#else +struct TORCH_API NativeDropoutBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeDropoutBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable mask_; + double scale; + +}; +#ifdef _WIN32 +struct EqBackward0 : public TraceableFunction { + TORCH_API EqBackward0() = default; +#else +struct TORCH_API EqBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EqBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct EqBackward1 : public TraceableFunction { + TORCH_API EqBackward1() = default; +#else +struct TORCH_API EqBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EqBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ErfBackward0 : public TraceableFunction { + TORCH_API ErfBackward0() = default; +#else +struct TORCH_API ErfBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ErfBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ErfcBackward0 : public TraceableFunction { + TORCH_API ErfcBackward0() = default; +#else +struct TORCH_API ErfcBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ErfcBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialErfcxBackward0 : public TraceableFunction { + TORCH_API SpecialErfcxBackward0() = default; +#else +struct TORCH_API SpecialErfcxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialErfcxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ErfinvBackward0 : public TraceableFunction { + TORCH_API ErfinvBackward0() = default; +#else +struct TORCH_API ErfinvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ErfinvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ExpBackward0 : public TraceableFunction { + TORCH_API ExpBackward0() = default; +#else +struct TORCH_API ExpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct Exp2Backward0 : public TraceableFunction { + TORCH_API Exp2Backward0() = default; +#else +struct TORCH_API Exp2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Exp2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct Expm1Backward0 : public TraceableFunction { + TORCH_API Expm1Backward0() = default; +#else +struct TORCH_API Expm1Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Expm1Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ExpandBackward0 : public Node { + TORCH_API ExpandBackward0() = default; +#else +struct TORCH_API ExpandBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExpandBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ExponentialBackward0 : public TraceableFunction { + TORCH_API ExponentialBackward0() = default; +#else +struct TORCH_API ExponentialBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExponentialBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FakeQuantizePerTensorAffineCachemaskBackward0 : public TraceableFunction { + TORCH_API FakeQuantizePerTensorAffineCachemaskBackward0() = default; +#else +struct TORCH_API FakeQuantizePerTensorAffineCachemaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizePerTensorAffineCachemaskBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0 : public TraceableFunction { + TORCH_API FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0() = default; +#else +struct TORCH_API FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FakeQuantizeLearnablePerTensorAffineBackward0 : public TraceableFunction { + TORCH_API FakeQuantizeLearnablePerTensorAffineBackward0() = default; +#else +struct TORCH_API FakeQuantizeLearnablePerTensorAffineBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizeLearnablePerTensorAffineBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scale_.reset_data(); + self_.reset_data(); + zero_point_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double grad_factor; + int64_t quant_max = 0; + int64_t quant_min = 0; + SavedVariable scale_; + SavedVariable self_; + SavedVariable zero_point_; + +}; +#ifdef _WIN32 +struct FakeQuantizePerChannelAffineCachemaskBackward0 : public TraceableFunction { + TORCH_API FakeQuantizePerChannelAffineCachemaskBackward0() = default; +#else +struct TORCH_API FakeQuantizePerChannelAffineCachemaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizePerChannelAffineCachemaskBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FakeQuantizeLearnablePerChannelAffineBackward0 : public TraceableFunction { + TORCH_API FakeQuantizeLearnablePerChannelAffineBackward0() = default; +#else +struct TORCH_API FakeQuantizeLearnablePerChannelAffineBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FakeQuantizeLearnablePerChannelAffineBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scale_.reset_data(); + self_.reset_data(); + zero_point_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t axis = 0; + double grad_factor; + int64_t quant_max = 0; + int64_t quant_min = 0; + SavedVariable scale_; + SavedVariable self_; + SavedVariable zero_point_; + +}; +#ifdef _WIN32 +struct FusedMovingAvgObsFqHelperBackward0 : public TraceableFunction { + TORCH_API FusedMovingAvgObsFqHelperBackward0() = default; +#else +struct TORCH_API FusedMovingAvgObsFqHelperBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FusedMovingAvgObsFqHelperBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct FillBackward0 : public TraceableFunction { + TORCH_API FillBackward0() = default; +#else +struct TORCH_API FillBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FillBackward1 : public TraceableFunction { + TORCH_API FillBackward1() = default; +#else +struct TORCH_API FillBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FillBackward2 : public TraceableFunction { + TORCH_API FillBackward2() = default; +#else +struct TORCH_API FillBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FillBackward3 : public TraceableFunction { + TORCH_API FillBackward3() = default; +#else +struct TORCH_API FillBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FillBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FloorBackward0 : public TraceableFunction { + TORCH_API FloorBackward0() = default; +#else +struct TORCH_API FloorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FloorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FmodBackward0 : public TraceableFunction { + TORCH_API FmodBackward0() = default; +#else +struct TORCH_API FmodBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FmodBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FmodBackward1 : public TraceableFunction { + TORCH_API FmodBackward1() = default; +#else +struct TORCH_API FmodBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FmodBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FracBackward0 : public TraceableFunction { + TORCH_API FracBackward0() = default; +#else +struct TORCH_API FracBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FracBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FrexpBackward0 : public TraceableFunction { + TORCH_API FrexpBackward0() = default; +#else +struct TORCH_API FrexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FrexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable exponent_; + +}; +#ifdef _WIN32 +struct GatherBackward0 : public TraceableFunction { + TORCH_API GatherBackward0() = default; +#else +struct TORCH_API GatherBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GatherBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + SavedVariable self_; + bool sparse_grad; + +}; +#ifdef _WIN32 +struct GeBackward0 : public TraceableFunction { + TORCH_API GeBackward0() = default; +#else +struct TORCH_API GeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GeBackward1 : public TraceableFunction { + TORCH_API GeBackward1() = default; +#else +struct TORCH_API GeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GeometricBackward0 : public TraceableFunction { + TORCH_API GeometricBackward0() = default; +#else +struct TORCH_API GeometricBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeometricBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct GeqrfBackward0 : public TraceableFunction { + TORCH_API GeqrfBackward0() = default; +#else +struct TORCH_API GeqrfBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeqrfBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct GridSampler2DBackward0 : public TraceableFunction { + TORCH_API GridSampler2DBackward0() = default; +#else +struct TORCH_API GridSampler2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GridSampler2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + input_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + SavedVariable grid_; + SavedVariable input_; + int64_t interpolation_mode = 0; + int64_t padding_mode = 0; + +}; +#ifdef _WIN32 +struct GridSampler3DBackward0 : public TraceableFunction { + TORCH_API GridSampler3DBackward0() = default; +#else +struct TORCH_API GridSampler3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GridSampler3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + input_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + SavedVariable grid_; + SavedVariable input_; + int64_t interpolation_mode = 0; + int64_t padding_mode = 0; + +}; +#ifdef _WIN32 +struct GridSampler2DCpuFallbackBackward0 : public TraceableFunction { + TORCH_API GridSampler2DCpuFallbackBackward0() = default; +#else +struct TORCH_API GridSampler2DCpuFallbackBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GridSampler2DCpuFallbackBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + input_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + SavedVariable grid_; + SavedVariable input_; + int64_t interpolation_mode = 0; + int64_t padding_mode = 0; + +}; +#ifdef _WIN32 +struct GtBackward0 : public TraceableFunction { + TORCH_API GtBackward0() = default; +#else +struct TORCH_API GtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GtBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GtBackward1 : public TraceableFunction { + TORCH_API GtBackward1() = default; +#else +struct TORCH_API GtBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GtBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct HardsigmoidBackward0 : public TraceableFunction { + TORCH_API HardsigmoidBackward0() = default; +#else +struct TORCH_API HardsigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardsigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardswishBackward0 : public TraceableFunction { + TORCH_API HardswishBackward0() = default; +#else +struct TORCH_API HardswishBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardswishBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardswishBackwardBackward0 : public TraceableFunction { + TORCH_API HardswishBackwardBackward0() = default; +#else +struct TORCH_API HardswishBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardswishBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable self_; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct HypotBackward0 : public TraceableFunction { + TORCH_API HypotBackward0() = default; +#else +struct TORCH_API HypotBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HypotBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct I0Backward0 : public TraceableFunction { + TORCH_API I0Backward0() = default; +#else +struct TORCH_API I0Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "I0Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialI0EBackward0 : public TraceableFunction { + TORCH_API SpecialI0EBackward0() = default; +#else +struct TORCH_API SpecialI0EBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialI0EBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SpecialI1Backward0 : public TraceableFunction { + TORCH_API SpecialI1Backward0() = default; +#else +struct TORCH_API SpecialI1Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialI1Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SpecialI1EBackward0 : public TraceableFunction { + TORCH_API SpecialI1EBackward0() = default; +#else +struct TORCH_API SpecialI1EBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialI1EBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct IgammaBackward0 : public TraceableFunction { + TORCH_API IgammaBackward0() = default; +#else +struct TORCH_API IgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct IgammacBackward0 : public TraceableFunction { + TORCH_API IgammacBackward0() = default; +#else +struct TORCH_API IgammacBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IgammacBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct IndexBackward0 : public TraceableFunction { + TORCH_API IndexBackward0() = default; +#else +struct TORCH_API IndexBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector indices_; + bool indices_released_ = false; + at::TensorOptions self_options; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UnsafeIndexBackward0 : public TraceableFunction { + TORCH_API UnsafeIndexBackward0() = default; +#else +struct TORCH_API UnsafeIndexBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeIndexBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector indices_; + bool indices_released_ = false; + at::TensorOptions self_options; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct IndexAddBackward0 : public TraceableFunction { + TORCH_API IndexAddBackward0() = default; +#else +struct TORCH_API IndexAddBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexAddBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + source_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + int64_t dim = 0; + SavedVariable index_; + SavedVariable source_; + int64_t source_dim = 0; + +}; +#ifdef _WIN32 +struct IndexReduceBackward0 : public TraceableFunction { + TORCH_API IndexReduceBackward0() = default; +#else +struct TORCH_API IndexReduceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexReduceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + source_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool include_self; + SavedVariable index_; + std::string reduce; + SavedVariable self_; + SavedVariable source_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct IndexCopyBackward0 : public TraceableFunction { + TORCH_API IndexCopyBackward0() = default; +#else +struct TORCH_API IndexCopyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexCopyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + source_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + SavedVariable source_; + int64_t source_dim = 0; + +}; +#ifdef _WIN32 +struct IndexFillBackward0 : public TraceableFunction { + TORCH_API IndexFillBackward0() = default; +#else +struct TORCH_API IndexFillBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexFillBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct IndexFillBackward1 : public TraceableFunction { + TORCH_API IndexFillBackward1() = default; +#else +struct TORCH_API IndexFillBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexFillBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct IndexPutBackward0 : public TraceableFunction { + TORCH_API IndexPutBackward0() = default; +#else +struct TORCH_API IndexPutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexPutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + std::vector indices_; + bool indices_released_ = false; + torch::autograd::generated::TypeAndSize values_info; + +}; +#ifdef _WIN32 +struct UnsafeIndexPutBackward0 : public TraceableFunction { + TORCH_API UnsafeIndexPutBackward0() = default; +#else +struct TORCH_API UnsafeIndexPutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeIndexPutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + std::vector indices_; + bool indices_released_ = false; + torch::autograd::generated::TypeAndSize values_info; + +}; +#ifdef _WIN32 +struct IndexPutImplBackward0 : public TraceableFunction { + TORCH_API IndexPutImplBackward0() = default; +#else +struct TORCH_API IndexPutImplBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexPutImplBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.clear(); + indices_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + std::vector indices_; + bool indices_released_ = false; + torch::autograd::generated::TypeAndSize values_info; + +}; +#ifdef _WIN32 +struct IndexSelectBackward0 : public TraceableFunction { + TORCH_API IndexSelectBackward0() = default; +#else +struct TORCH_API IndexSelectBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "IndexSelectBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct LinalgInvExBackward0 : public TraceableFunction { + TORCH_API LinalgInvExBackward0() = default; +#else +struct TORCH_API LinalgInvExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgInvExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + inverse_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable inverse_; + +}; +#ifdef _WIN32 +struct LinalgPinvBackward0 : public TraceableFunction { + TORCH_API LinalgPinvBackward0() = default; +#else +struct TORCH_API LinalgPinvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgPinvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct KthvalueBackward0 : public TraceableFunction { + TORCH_API KthvalueBackward0() = default; +#else +struct TORCH_API KthvalueBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "KthvalueBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct LeBackward0 : public TraceableFunction { + TORCH_API LeBackward0() = default; +#else +struct TORCH_API LeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LeBackward1 : public TraceableFunction { + TORCH_API LeBackward1() = default; +#else +struct TORCH_API LeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LerpBackward0 : public TraceableFunction { + TORCH_API LerpBackward0() = default; +#else +struct TORCH_API LerpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LerpBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar weight; + +}; +#ifdef _WIN32 +struct LerpBackward1 : public TraceableFunction { + TORCH_API LerpBackward1() = default; +#else +struct TORCH_API LerpBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LerpBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + end_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable end_; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct LgammaBackward0 : public TraceableFunction { + TORCH_API LgammaBackward0() = default; +#else +struct TORCH_API LgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct DigammaBackward0 : public TraceableFunction { + TORCH_API DigammaBackward0() = default; +#else +struct TORCH_API DigammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DigammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct PolygammaBackward0 : public TraceableFunction { + TORCH_API PolygammaBackward0() = default; +#else +struct TORCH_API PolygammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PolygammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t n = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct PolygammaBackward1 : public TraceableFunction { + TORCH_API PolygammaBackward1() = default; +#else +struct TORCH_API PolygammaBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PolygammaBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t n = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogBackward0 : public TraceableFunction { + TORCH_API LogBackward0() = default; +#else +struct TORCH_API LogBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Log10Backward0 : public TraceableFunction { + TORCH_API Log10Backward0() = default; +#else +struct TORCH_API Log10Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Log10Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Log1PBackward0 : public TraceableFunction { + TORCH_API Log1PBackward0() = default; +#else +struct TORCH_API Log1PBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Log1PBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Log2Backward0 : public TraceableFunction { + TORCH_API Log2Backward0() = default; +#else +struct TORCH_API Log2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Log2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogaddexpBackward0 : public TraceableFunction { + TORCH_API LogaddexpBackward0() = default; +#else +struct TORCH_API LogaddexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogaddexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct Logaddexp2Backward0 : public TraceableFunction { + TORCH_API Logaddexp2Backward0() = default; +#else +struct TORCH_API Logaddexp2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Logaddexp2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct XlogyBackward0 : public TraceableFunction { + TORCH_API XlogyBackward0() = default; +#else +struct TORCH_API XlogyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "XlogyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct XlogyBackward1 : public TraceableFunction { + TORCH_API XlogyBackward1() = default; +#else +struct TORCH_API XlogyBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "XlogyBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar self; + +}; +#ifdef _WIN32 +struct XlogyBackward2 : public TraceableFunction { + TORCH_API XlogyBackward2() = default; +#else +struct TORCH_API XlogyBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "XlogyBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialXlog1PyBackward0 : public TraceableFunction { + TORCH_API SpecialXlog1PyBackward0() = default; +#else +struct TORCH_API SpecialXlog1PyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialXlog1PyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialXlog1PyBackward1 : public TraceableFunction { + TORCH_API SpecialXlog1PyBackward1() = default; +#else +struct TORCH_API SpecialXlog1PyBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialXlog1PyBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar self; + +}; +#ifdef _WIN32 +struct SpecialXlog1PyBackward2 : public TraceableFunction { + TORCH_API SpecialXlog1PyBackward2() = default; +#else +struct TORCH_API SpecialXlog1PyBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialXlog1PyBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialZetaBackward0 : public TraceableFunction { + TORCH_API SpecialZetaBackward0() = default; +#else +struct TORCH_API SpecialZetaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialZetaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialZetaBackward1 : public TraceableFunction { + TORCH_API SpecialZetaBackward1() = default; +#else +struct TORCH_API SpecialZetaBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialZetaBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::Scalar self; + +}; +#ifdef _WIN32 +struct SpecialZetaBackward2 : public TraceableFunction { + TORCH_API SpecialZetaBackward2() = default; +#else +struct TORCH_API SpecialZetaBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialZetaBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LogNormalBackward0 : public TraceableFunction { + TORCH_API LogNormalBackward0() = default; +#else +struct TORCH_API LogNormalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogNormalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LogsumexpBackward0 : public TraceableFunction { + TORCH_API LogsumexpBackward0() = default; +#else +struct TORCH_API LogsumexpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogsumexpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgLstsqBackward0 : public TraceableFunction { + TORCH_API LinalgLstsqBackward0() = default; +#else +struct TORCH_API LinalgLstsqBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLstsqBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + b_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable b_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LtBackward0 : public TraceableFunction { + TORCH_API LtBackward0() = default; +#else +struct TORCH_API LtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LtBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LtBackward1 : public TraceableFunction { + TORCH_API LtBackward1() = default; +#else +struct TORCH_API LtBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LtBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct LinalgLuFactorExBackward0 : public TraceableFunction { + TORCH_API LinalgLuFactorExBackward0() = default; +#else +struct TORCH_API LinalgLuFactorExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLuFactorExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + LU_.reset_data(); + pivots_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool pivot; + SavedVariable LU_; + SavedVariable pivots_; + +}; +#ifdef _WIN32 +struct LinalgLuBackward0 : public TraceableFunction { + TORCH_API LinalgLuBackward0() = default; +#else +struct TORCH_API LinalgLuBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLuBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + L_.reset_data(); + P_.reset_data(); + U_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool pivot; + SavedVariable L_; + SavedVariable P_; + SavedVariable U_; + +}; +#ifdef _WIN32 +struct LinalgLuSolveBackward0 : public TraceableFunction { + TORCH_API LinalgLuSolveBackward0() = default; +#else +struct TORCH_API LinalgLuSolveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgLuSolveBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + LU_.reset_data(); + pivots_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable LU_; + bool adjoint; + bool left; + SavedVariable pivots_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LuUnpackBackward0 : public TraceableFunction { + TORCH_API LuUnpackBackward0() = default; +#else +struct TORCH_API LuUnpackBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LuUnpackBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt LU_data_sym_argsize_minus_1; + c10::SymInt LU_data_sym_argsize_minus_2; + +}; +#ifdef _WIN32 +struct MaskedFillBackward0 : public TraceableFunction { + TORCH_API MaskedFillBackward0() = default; +#else +struct TORCH_API MaskedFillBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedFillBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct MaskedFillBackward1 : public TraceableFunction { + TORCH_API MaskedFillBackward1() = default; +#else +struct TORCH_API MaskedFillBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedFillBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct MaskedScatterBackward0 : public TraceableFunction { + TORCH_API MaskedScatterBackward0() = default; +#else +struct TORCH_API MaskedScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedScatterBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + std::vector source_sym_sizes; + +}; +#ifdef _WIN32 +struct MaskedScatterBackwardBackward0 : public TraceableFunction { + TORCH_API MaskedScatterBackwardBackward0() = default; +#else +struct TORCH_API MaskedScatterBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedScatterBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize grad_output_info; + SavedVariable mask_; + +}; +#ifdef _WIN32 +struct MaskedSelectBackward0 : public TraceableFunction { + TORCH_API MaskedSelectBackward0() = default; +#else +struct TORCH_API MaskedSelectBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedSelectBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LinalgMatrixExpBackward0 : public TraceableFunction { + TORCH_API LinalgMatrixExpBackward0() = default; +#else +struct TORCH_API LinalgMatrixExpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgMatrixExpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MaxBackward0 : public TraceableFunction { + TORCH_API MaxBackward0() = default; +#else +struct TORCH_API MaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MaxBackward1 : public TraceableFunction { + TORCH_API MaxBackward1() = default; +#else +struct TORCH_API MaxBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MaximumBackward0 : public TraceableFunction { + TORCH_API MaximumBackward0() = default; +#else +struct TORCH_API MaximumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaximumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FmaxBackward0 : public TraceableFunction { + TORCH_API FmaxBackward0() = default; +#else +struct TORCH_API FmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MeanBackward0 : public TraceableFunction { + TORCH_API MeanBackward0() = default; +#else +struct TORCH_API MeanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MeanBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt self_sym_numel; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct MeanBackward1 : public TraceableFunction { + TORCH_API MeanBackward1() = default; +#else +struct TORCH_API MeanBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MeanBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + c10::SymInt self_sym_numel; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct MedianBackward0 : public TraceableFunction { + TORCH_API MedianBackward0() = default; +#else +struct TORCH_API MedianBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MedianBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NanmedianBackward0 : public TraceableFunction { + TORCH_API NanmedianBackward0() = default; +#else +struct TORCH_API NanmedianBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NanmedianBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MedianBackward1 : public TraceableFunction { + TORCH_API MedianBackward1() = default; +#else +struct TORCH_API MedianBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MedianBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct NanmedianBackward1 : public TraceableFunction { + TORCH_API NanmedianBackward1() = default; +#else +struct TORCH_API NanmedianBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NanmedianBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MinBackward0 : public TraceableFunction { + TORCH_API MinBackward0() = default; +#else +struct TORCH_API MinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MinBackward1 : public TraceableFunction { + TORCH_API MinBackward1() = default; +#else +struct TORCH_API MinBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MinBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MinimumBackward0 : public TraceableFunction { + TORCH_API MinimumBackward0() = default; +#else +struct TORCH_API MinimumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MinimumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FminBackward0 : public TraceableFunction { + TORCH_API FminBackward0() = default; +#else +struct TORCH_API FminBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FminBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AmaxBackward0 : public TraceableFunction { + TORCH_API AmaxBackward0() = default; +#else +struct TORCH_API AmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct AminBackward0 : public TraceableFunction { + TORCH_API AminBackward0() = default; +#else +struct TORCH_API AminBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AminBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MmBackward0 : public TraceableFunction { + TORCH_API MmBackward0() = default; +#else +struct TORCH_API MmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat2_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mat2_; + at::Layout mat2_layout; + std::vector mat2_sym_sizes; + std::vector mat2_sym_strides; + SavedVariable self_; + at::Layout self_layout; + std::vector self_sym_sizes; + std::vector self_sym_strides; + +}; +#ifdef _WIN32 +struct ModeBackward0 : public TraceableFunction { + TORCH_API ModeBackward0() = default; +#else +struct TORCH_API ModeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ModeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MulBackward0 : public TraceableFunction { + TORCH_API MulBackward0() = default; +#else +struct TORCH_API MulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + at::ScalarType other_scalar_type; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct MulBackward1 : public TraceableFunction { + TORCH_API MulBackward1() = default; +#else +struct TORCH_API MulBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MulBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar other; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct MvBackward0 : public TraceableFunction { + TORCH_API MvBackward0() = default; +#else +struct TORCH_API MvBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MvBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + vec_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable vec_; + +}; +#ifdef _WIN32 +struct MvlgammaBackward0 : public TraceableFunction { + TORCH_API MvlgammaBackward0() = default; +#else +struct TORCH_API MvlgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MvlgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t p = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NanToNumBackward0 : public TraceableFunction { + TORCH_API NanToNumBackward0() = default; +#else +struct TORCH_API NanToNumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NanToNumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NativeBatchNormBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormBackward0() = default; +#else +struct TORCH_API NativeBatchNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormLegitBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormLegitBackward0() = default; +#else +struct TORCH_API NativeBatchNormLegitBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormLegitBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormLegitNoTrainingBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormLegitNoTrainingBackward0() = default; +#else +struct TORCH_API NativeBatchNormLegitNoTrainingBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormLegitNoTrainingBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormLegitBackward1 : public TraceableFunction { + TORCH_API NativeBatchNormLegitBackward1() = default; +#else +struct TORCH_API NativeBatchNormLegitBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormLegitBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable input_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeBatchNormBackwardBackward0 : public TraceableFunction { + TORCH_API NativeBatchNormBackwardBackward0() = default; +#else +struct TORCH_API NativeBatchNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeBatchNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_out_.reset_data(); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + save_invstd_.reset_data(); + save_mean_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double eps; + SavedVariable grad_out_; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable save_invstd_; + SavedVariable save_mean_; + bool train; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NativeLayerNormBackward0 : public TraceableFunction { + TORCH_API NativeLayerNormBackward0() = default; +#else +struct TORCH_API NativeLayerNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeLayerNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + bias_.reset_data(); + input_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable bias_; + SavedVariable input_; + std::vector normalized_shape; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NativeLayerNormBackwardBackward0 : public TraceableFunction { + TORCH_API NativeLayerNormBackwardBackward0() = default; +#else +struct TORCH_API NativeLayerNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeLayerNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_out_.reset_data(); + input_.reset_data(); + mean_.reset_data(); + rstd_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_out_; + SavedVariable input_; + SavedVariable mean_; + std::vector normalized_shape; + SavedVariable rstd_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NativeGroupNormBackward0 : public TraceableFunction { + TORCH_API NativeGroupNormBackward0() = default; +#else +struct TORCH_API NativeGroupNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NativeGroupNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt C; + c10::SymInt HxW; + c10::SymInt N; + double eps; + int64_t group = 0; + SavedVariable input_; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct NeBackward0 : public TraceableFunction { + TORCH_API NeBackward0() = default; +#else +struct TORCH_API NeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct NeBackward1 : public TraceableFunction { + TORCH_API NeBackward1() = default; +#else +struct TORCH_API NeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize other_info; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct NegBackward0 : public TraceableFunction { + TORCH_API NegBackward0() = default; +#else +struct TORCH_API NegBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NegBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NextafterBackward0 : public TraceableFunction { + TORCH_API NextafterBackward0() = default; +#else +struct TORCH_API NextafterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NextafterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NormBackward0 : public TraceableFunction { + TORCH_API NormBackward0() = default; +#else +struct TORCH_API NormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NormBackward1 : public TraceableFunction { + TORCH_API NormBackward1() = default; +#else +struct TORCH_API NormBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + c10::optional p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NormBackward2 : public TraceableFunction { + TORCH_API NormBackward2() = default; +#else +struct TORCH_API NormBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct NormBackward3 : public TraceableFunction { + TORCH_API NormBackward3() = default; +#else +struct TORCH_API NormBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormBackward3"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool keepdim; + c10::optional p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LinalgVectorNormBackward0 : public TraceableFunction { + TORCH_API LinalgVectorNormBackward0() = default; +#else +struct TORCH_API LinalgVectorNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgVectorNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + at::Scalar ord; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PdistBackward0 : public TraceableFunction { + TORCH_API PdistBackward0() = default; +#else +struct TORCH_API PdistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PdistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PdistBackwardBackward0 : public TraceableFunction { + TORCH_API PdistBackwardBackward0() = default; +#else +struct TORCH_API PdistBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PdistBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct EuclideanDistBackward0 : public TraceableFunction { + TORCH_API EuclideanDistBackward0() = default; +#else +struct TORCH_API EuclideanDistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EuclideanDistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + x1_.reset_data(); + x2_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable x1_; + SavedVariable x2_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CdistBackward0 : public TraceableFunction { + TORCH_API CdistBackward0() = default; +#else +struct TORCH_API CdistBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CdistBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + x1_.reset_data(); + x2_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double p; + SavedVariable x1_; + SavedVariable x2_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CdistBackwardBackward0 : public TraceableFunction { + TORCH_API CdistBackwardBackward0() = default; +#else +struct TORCH_API CdistBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CdistBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NormalBackward0 : public TraceableFunction { + TORCH_API NormalBackward0() = default; +#else +struct TORCH_API NormalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NormalBackward1 : public TraceableFunction { + TORCH_API NormalBackward1() = default; +#else +struct TORCH_API NormalBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector mean_sym_sizes; + +}; +#ifdef _WIN32 +struct NormalBackward2 : public TraceableFunction { + TORCH_API NormalBackward2() = default; +#else +struct TORCH_API NormalBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector std_sym_sizes; + +}; +#ifdef _WIN32 +struct NormalBackward3 : public TraceableFunction { + TORCH_API NormalBackward3() = default; +#else +struct TORCH_API NormalBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NormalBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector mean_sym_sizes; + std::vector std_sym_sizes; + +}; +#ifdef _WIN32 +struct LinalgHouseholderProductBackward0 : public TraceableFunction { + TORCH_API LinalgHouseholderProductBackward0() = default; +#else +struct TORCH_API LinalgHouseholderProductBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgHouseholderProductBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + tau_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input_; + SavedVariable tau_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct OrmqrBackward0 : public TraceableFunction { + TORCH_API OrmqrBackward0() = default; +#else +struct TORCH_API OrmqrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "OrmqrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input2_.reset_data(); + input3_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input2_; + SavedVariable input3_; + bool left; + SavedVariable self_; + bool transpose; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PermuteBackward0 : public Node { + TORCH_API PermuteBackward0() = default; +#else +struct TORCH_API PermuteBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PermuteBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + +}; +#ifdef _WIN32 +struct PoissonBackward0 : public TraceableFunction { + TORCH_API PoissonBackward0() = default; +#else +struct TORCH_API PoissonBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PoissonBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct PowBackward0 : public TraceableFunction { + TORCH_API PowBackward0() = default; +#else +struct TORCH_API PowBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PowBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar exponent; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct PowBackward1 : public TraceableFunction { + TORCH_API PowBackward1() = default; +#else +struct TORCH_API PowBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PowBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.reset_data(); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable exponent_; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PowBackward2 : public TraceableFunction { + TORCH_API PowBackward2() = default; +#else +struct TORCH_API PowBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PowBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable exponent_; + at::Scalar self; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ProdBackward0 : public TraceableFunction { + TORCH_API ProdBackward0() = default; +#else +struct TORCH_API ProdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ProdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ProdBackward1 : public TraceableFunction { + TORCH_API ProdBackward1() = default; +#else +struct TORCH_API ProdBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ProdBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PutBackward0 : public TraceableFunction { + TORCH_API PutBackward0() = default; +#else +struct TORCH_API PutBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PutBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + source_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool accumulate; + SavedVariable index_; + SavedVariable source_; + torch::autograd::generated::TypeAndSize source_info; + +}; +#ifdef _WIN32 +struct LinalgQrBackward0 : public TraceableFunction { + TORCH_API LinalgQrBackward0() = default; +#else +struct TORCH_API LinalgQrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgQrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + Q_.reset_data(); + R_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::string mode; + SavedVariable Q_; + SavedVariable R_; + +}; +#ifdef _WIN32 +struct Rad2DegBackward0 : public TraceableFunction { + TORCH_API Rad2DegBackward0() = default; +#else +struct TORCH_API Rad2DegBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Rad2DegBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RandomBackward0 : public TraceableFunction { + TORCH_API RandomBackward0() = default; +#else +struct TORCH_API RandomBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RandomBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RandomBackward1 : public TraceableFunction { + TORCH_API RandomBackward1() = default; +#else +struct TORCH_API RandomBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RandomBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RandomBackward2 : public TraceableFunction { + TORCH_API RandomBackward2() = default; +#else +struct TORCH_API RandomBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RandomBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ReciprocalBackward0 : public TraceableFunction { + TORCH_API ReciprocalBackward0() = default; +#else +struct TORCH_API ReciprocalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReciprocalBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct RemainderBackward0 : public TraceableFunction { + TORCH_API RemainderBackward0() = default; +#else +struct TORCH_API RemainderBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RemainderBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RemainderBackward1 : public TraceableFunction { + TORCH_API RemainderBackward1() = default; +#else +struct TORCH_API RemainderBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RemainderBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct RenormBackward0 : public TraceableFunction { + TORCH_API RenormBackward0() = default; +#else +struct TORCH_API RenormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RenormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::Scalar maxnorm; + at::Scalar p; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct RepeatBackward0 : public TraceableFunction { + TORCH_API RepeatBackward0() = default; +#else +struct TORCH_API RepeatBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RepeatBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector repeats; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SpecialEntrBackward0 : public TraceableFunction { + TORCH_API SpecialEntrBackward0() = default; +#else +struct TORCH_API SpecialEntrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialEntrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SpecialNdtriBackward0 : public TraceableFunction { + TORCH_API SpecialNdtriBackward0() = default; +#else +struct TORCH_API SpecialNdtriBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialNdtriBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SpecialLogNdtrBackward0 : public TraceableFunction { + TORCH_API SpecialLogNdtrBackward0() = default; +#else +struct TORCH_API SpecialLogNdtrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SpecialLogNdtrBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ReshapeAliasBackward0 : public Node { + TORCH_API ReshapeAliasBackward0() = default; +#else +struct TORCH_API ReshapeAliasBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReshapeAliasBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct RoundBackward0 : public TraceableFunction { + TORCH_API RoundBackward0() = default; +#else +struct TORCH_API RoundBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RoundBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RoundBackward1 : public TraceableFunction { + TORCH_API RoundBackward1() = default; +#else +struct TORCH_API RoundBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RoundBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct RsqrtBackward0 : public TraceableFunction { + TORCH_API RsqrtBackward0() = default; +#else +struct TORCH_API RsqrtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RsqrtBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ScatterBackward0 : public TraceableFunction { + TORCH_API ScatterBackward0() = default; +#else +struct TORCH_API ScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct ScatterBackward1 : public TraceableFunction { + TORCH_API ScatterBackward1() = default; +#else +struct TORCH_API ScatterBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct ScatterAddBackward0 : public TraceableFunction { + TORCH_API ScatterAddBackward0() = default; +#else +struct TORCH_API ScatterAddBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterAddBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable index_; + +}; +#ifdef _WIN32 +struct SelectBackward0 : public Node { + TORCH_API SelectBackward0() = default; +#else +struct TORCH_API SelectBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SelectBackwardAutogradNestedTensor0 : public Node { + TORCH_API SelectBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SelectBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SelectBackwardBackward0 : public TraceableFunction { + TORCH_API SelectBackwardBackward0() = default; +#else +struct TORCH_API SelectBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + +}; +#ifdef _WIN32 +struct SigmoidBackward0 : public TraceableFunction { + TORCH_API SigmoidBackward0() = default; +#else +struct TORCH_API SigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LogitBackward0 : public TraceableFunction { + TORCH_API LogitBackward0() = default; +#else +struct TORCH_API LogitBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogitBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional eps; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SignBackward0 : public TraceableFunction { + TORCH_API SignBackward0() = default; +#else +struct TORCH_API SignBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SignBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct SgnBackward0 : public TraceableFunction { + TORCH_API SgnBackward0() = default; +#else +struct TORCH_API SgnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SgnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SinBackward0 : public TraceableFunction { + TORCH_API SinBackward0() = default; +#else +struct TORCH_API SinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SincBackward0 : public TraceableFunction { + TORCH_API SincBackward0() = default; +#else +struct TORCH_API SincBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SincBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SinhBackward0 : public TraceableFunction { + TORCH_API SinhBackward0() = default; +#else +struct TORCH_API SinhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SinhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SliceBackward0 : public Node { + TORCH_API SliceBackward0() = default; +#else +struct TORCH_API SliceBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + std::vector self_sym_sizes; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SliceBackwardBackward0 : public TraceableFunction { + TORCH_API SliceBackwardBackward0() = default; +#else +struct TORCH_API SliceBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt end; + c10::SymInt start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SliceScatterBackward0 : public TraceableFunction { + TORCH_API SliceScatterBackward0() = default; +#else +struct TORCH_API SliceScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + torch::autograd::generated::TypeAndSize src_info; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SelectScatterBackward0 : public TraceableFunction { + TORCH_API SelectScatterBackward0() = default; +#else +struct TORCH_API SelectScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + torch::autograd::generated::TypeAndSize src_info; + +}; +#ifdef _WIN32 +struct DiagonalScatterBackward0 : public TraceableFunction { + TORCH_API DiagonalScatterBackward0() = default; +#else +struct TORCH_API DiagonalScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + torch::autograd::generated::TypeAndSize src_info; + +}; +#ifdef _WIN32 +struct AsStridedScatterBackward0 : public TraceableFunction { + TORCH_API AsStridedScatterBackward0() = default; +#else +struct TORCH_API AsStridedScatterBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedScatterBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + at::TensorGeometry src_geometry; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct LinalgSolveExBackward0 : public TraceableFunction { + TORCH_API LinalgSolveExBackward0() = default; +#else +struct TORCH_API LinalgSolveExBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSolveExBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + LU_.reset_data(); + pivots_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + bool left; + SavedVariable LU_; + SavedVariable pivots_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SortBackward0 : public TraceableFunction { + TORCH_API SortBackward0() = default; +#else +struct TORCH_API SortBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SortBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct SortBackward1 : public TraceableFunction { + TORCH_API SortBackward1() = default; +#else +struct TORCH_API SortBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SortBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct SplitBackward0 : public Node { + TORCH_API SplitBackward0() = default; +#else +struct TORCH_API SplitBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + c10::SymInt split_size; + +}; +#ifdef _WIN32 +struct UnsafeSplitBackward0 : public TraceableFunction { + TORCH_API UnsafeSplitBackward0() = default; +#else +struct TORCH_API UnsafeSplitBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeSplitBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + c10::SymInt split_size; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackward0 : public Node { + TORCH_API SplitWithSizesBackward0() = default; +#else +struct TORCH_API SplitWithSizesBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackwardAutogradNestedTensor0 : public Node { + TORCH_API SplitWithSizesBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SplitWithSizesBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct UnsafeSplitWithSizesBackward0 : public TraceableFunction { + TORCH_API UnsafeSplitWithSizesBackward0() = default; +#else +struct TORCH_API UnsafeSplitWithSizesBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeSplitWithSizesBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SqrtBackward0 : public TraceableFunction { + TORCH_API SqrtBackward0() = default; +#else +struct TORCH_API SqrtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqrtBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SqueezeBackward0 : public Node { + TORCH_API SqueezeBackward0() = default; +#else +struct TORCH_API SqueezeBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward1 : public Node { + TORCH_API SqueezeBackward1() = default; +#else +struct TORCH_API SqueezeBackward1 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor0 : public Node { + TORCH_API SqueezeBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct SqueezeBackward2 : public Node { + TORCH_API SqueezeBackward2() = default; +#else +struct TORCH_API SqueezeBackward2 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward2"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor1 : public Node { + TORCH_API SqueezeBackwardAutogradNestedTensor1() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor1 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t self_dim = 0; + +}; +#ifdef _WIN32 +struct SqueezeBackward3 : public TraceableFunction { + TORCH_API SqueezeBackward3() = default; +#else +struct TORCH_API SqueezeBackward3 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward3"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward4 : public TraceableFunction { + TORCH_API SqueezeBackward4() = default; +#else +struct TORCH_API SqueezeBackward4 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward4"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward5 : public TraceableFunction { + TORCH_API SqueezeBackward5() = default; +#else +struct TORCH_API SqueezeBackward5 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward5"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct StdBackward0 : public TraceableFunction { + TORCH_API StdBackward0() = default; +#else +struct TORCH_API StdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct StdMeanBackward0 : public TraceableFunction { + TORCH_API StdMeanBackward0() = default; +#else +struct TORCH_API StdMeanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StdMeanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result0_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + SavedVariable result0_; + +}; +#ifdef _WIN32 +struct SubBackward0 : public TraceableFunction { + TORCH_API SubBackward0() = default; +#else +struct TORCH_API SubBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SubBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType other_scalar_type; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct SubBackward1 : public TraceableFunction { + TORCH_API SubBackward1() = default; +#else +struct TORCH_API SubBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SubBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct RsubBackward0 : public TraceableFunction { + TORCH_API RsubBackward0() = default; +#else +struct TORCH_API RsubBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RsubBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType other_scalar_type; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct RsubBackward1 : public TraceableFunction { + TORCH_API RsubBackward1() = default; +#else +struct TORCH_API RsubBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RsubBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct SumBackward0 : public TraceableFunction { + TORCH_API SumBackward0() = default; +#else +struct TORCH_API SumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SumBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SumBackward1 : public TraceableFunction { + TORCH_API SumBackward1() = default; +#else +struct TORCH_API SumBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SumBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SumBackwardAutogradNestedTensor0 : public TraceableFunction { + TORCH_API SumBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API SumBackwardAutogradNestedTensor0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SumBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NansumBackward0 : public TraceableFunction { + TORCH_API NansumBackward0() = default; +#else +struct TORCH_API NansumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NansumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + at::ScalarType self_scalar_type; + +}; +#ifdef _WIN32 +struct LinalgSvdBackward0 : public TraceableFunction { + TORCH_API LinalgSvdBackward0() = default; +#else +struct TORCH_API LinalgSvdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSvdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + S_.reset_data(); + U_.reset_data(); + Vh_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool full_matrices; + SavedVariable S_; + c10::SymInt S_sym_argsize_minus_1; + SavedVariable U_; + SavedVariable Vh_; + +}; +#ifdef _WIN32 +struct LinalgEighBackward0 : public TraceableFunction { + TORCH_API LinalgEighBackward0() = default; +#else +struct TORCH_API LinalgEighBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgEighBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + eigenvalues_.reset_data(); + eigenvectors_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable eigenvalues_; + SavedVariable eigenvectors_; + +}; +#ifdef _WIN32 +struct LinalgEigBackward0 : public TraceableFunction { + TORCH_API LinalgEigBackward0() = default; +#else +struct TORCH_API LinalgEigBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgEigBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + eigenvalues_.reset_data(); + eigenvectors_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::ScalarType self_scalar_type; + SavedVariable eigenvalues_; + SavedVariable eigenvectors_; + +}; +#ifdef _WIN32 +struct TBackward0 : public Node { + TORCH_API TBackward0() = default; +#else +struct TORCH_API TBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TBackward1 : public TraceableFunction { + TORCH_API TBackward1() = default; +#else +struct TORCH_API TBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct FlipBackward0 : public TraceableFunction { + TORCH_API FlipBackward0() = default; +#else +struct TORCH_API FlipBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FlipBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + +}; +#ifdef _WIN32 +struct RollBackward0 : public TraceableFunction { + TORCH_API RollBackward0() = default; +#else +struct TORCH_API RollBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RollBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + std::vector shifts; + +}; +#ifdef _WIN32 +struct Rot90Backward0 : public TraceableFunction { + TORCH_API Rot90Backward0() = default; +#else +struct TORCH_API Rot90Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Rot90Backward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + int64_t k = 0; + +}; +#ifdef _WIN32 +struct TakeBackward0 : public TraceableFunction { + TORCH_API TakeBackward0() = default; +#else +struct TORCH_API TakeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TakeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable index_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TanBackward0 : public TraceableFunction { + TORCH_API TanBackward0() = default; +#else +struct TORCH_API TanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct TanhBackward0 : public TraceableFunction { + TORCH_API TanhBackward0() = default; +#else +struct TORCH_API TanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct TopkBackward0 : public TraceableFunction { + TORCH_API TopkBackward0() = default; +#else +struct TORCH_API TopkBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TopkBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct TraceBackward0 : public TraceableFunction { + TORCH_API TraceBackward0() = default; +#else +struct TORCH_API TraceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TraceBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct TransposeBackward0 : public Node { + TORCH_API TransposeBackward0() = default; +#else +struct TORCH_API TransposeBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TransposeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim0 = 0; + int64_t dim1 = 0; + +}; +#ifdef _WIN32 +struct TransposeBackward1 : public TraceableFunction { + TORCH_API TransposeBackward1() = default; +#else +struct TORCH_API TransposeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TransposeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim0 = 0; + int64_t dim1 = 0; + +}; +#ifdef _WIN32 +struct TriangularSolveBackward0 : public TraceableFunction { + TORCH_API TriangularSolveBackward0() = default; +#else +struct TORCH_API TriangularSolveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TriangularSolveBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + A_.reset_data(); + self_.reset_data(); + solution_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable A_; + SavedVariable self_; + bool transpose; + bool unitriangular; + bool upper; + SavedVariable solution_; + +}; +#ifdef _WIN32 +struct LinalgSolveTriangularBackward0 : public TraceableFunction { + TORCH_API LinalgSolveTriangularBackward0() = default; +#else +struct TORCH_API LinalgSolveTriangularBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinalgSolveTriangularBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool left; + SavedVariable self_; + bool unitriangular; + bool upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct TrilBackward0 : public TraceableFunction { + TORCH_API TrilBackward0() = default; +#else +struct TORCH_API TrilBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TrilBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t diagonal = 0; + +}; +#ifdef _WIN32 +struct TriuBackward0 : public TraceableFunction { + TORCH_API TriuBackward0() = default; +#else +struct TORCH_API TriuBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TriuBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t diagonal = 0; + +}; +#ifdef _WIN32 +struct TruncBackward0 : public TraceableFunction { + TORCH_API TruncBackward0() = default; +#else +struct TORCH_API TruncBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TruncBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ToDenseBackward0 : public TraceableFunction { + TORCH_API ToDenseBackward0() = default; +#else +struct TORCH_API ToDenseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToDenseBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional masked_grad; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ToSparseBackward0 : public TraceableFunction { + TORCH_API ToSparseBackward0() = default; +#else +struct TORCH_API ToSparseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseBackward1 : public TraceableFunction { + TORCH_API ToSparseBackward1() = default; +#else +struct TORCH_API ToSparseBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseCsrBackward0 : public TraceableFunction { + TORCH_API ToSparseCsrBackward0() = default; +#else +struct TORCH_API ToSparseCsrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseCsrBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseCscBackward0 : public TraceableFunction { + TORCH_API ToSparseCscBackward0() = default; +#else +struct TORCH_API ToSparseCscBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseCscBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseBsrBackward0 : public TraceableFunction { + TORCH_API ToSparseBsrBackward0() = default; +#else +struct TORCH_API ToSparseBsrBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBsrBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToSparseBscBackward0 : public TraceableFunction { + TORCH_API ToSparseBscBackward0() = default; +#else +struct TORCH_API ToSparseBscBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToSparseBscBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Layout self_layout; + c10::OptionalArray self_self_sym_blocksize_opt; + +}; +#ifdef _WIN32 +struct ToMkldnnBackward0 : public TraceableFunction { + TORCH_API ToMkldnnBackward0() = default; +#else +struct TORCH_API ToMkldnnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToMkldnnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct UnfoldBackward0 : public Node { + TORCH_API UnfoldBackward0() = default; +#else +struct TORCH_API UnfoldBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnfoldBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dimension = 0; + std::vector self_sym_sizes; + int64_t size = 0; + int64_t step = 0; + +}; +#ifdef _WIN32 +struct UnfoldBackwardBackward0 : public TraceableFunction { + TORCH_API UnfoldBackwardBackward0() = default; +#else +struct TORCH_API UnfoldBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnfoldBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + int64_t size = 0; + int64_t step = 0; + +}; +#ifdef _WIN32 +struct UniformBackward0 : public TraceableFunction { + TORCH_API UniformBackward0() = default; +#else +struct TORCH_API UniformBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniformBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueBackward0 : public TraceableFunction { + TORCH_API UniqueBackward0() = default; +#else +struct TORCH_API UniqueBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueDimBackward0 : public TraceableFunction { + TORCH_API UniqueDimBackward0() = default; +#else +struct TORCH_API UniqueDimBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueDimBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueConsecutiveBackward0 : public TraceableFunction { + TORCH_API UniqueConsecutiveBackward0() = default; +#else +struct TORCH_API UniqueConsecutiveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueConsecutiveBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UniqueDimConsecutiveBackward0 : public TraceableFunction { + TORCH_API UniqueDimConsecutiveBackward0() = default; +#else +struct TORCH_API UniqueDimConsecutiveBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UniqueDimConsecutiveBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct Unique2Backward0 : public TraceableFunction { + TORCH_API Unique2Backward0() = default; +#else +struct TORCH_API Unique2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Unique2Backward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnsafeViewBackward0 : public TraceableFunction { + TORCH_API UnsafeViewBackward0() = default; +#else +struct TORCH_API UnsafeViewBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsafeViewBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct LiftBackward0 : public TraceableFunction { + TORCH_API LiftBackward0() = default; +#else +struct TORCH_API LiftBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LiftBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct LiftFreshBackward0 : public TraceableFunction { + TORCH_API LiftFreshBackward0() = default; +#else +struct TORCH_API LiftFreshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LiftFreshBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnsqueezeBackward0 : public Node { + TORCH_API UnsqueezeBackward0() = default; +#else +struct TORCH_API UnsqueezeBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsqueezeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct UnsqueezeBackward1 : public TraceableFunction { + TORCH_API UnsqueezeBackward1() = default; +#else +struct TORCH_API UnsqueezeBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsqueezeBackward1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct VarBackward0 : public TraceableFunction { + TORCH_API VarBackward0() = default; +#else +struct TORCH_API VarBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "VarBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct VarMeanBackward0 : public TraceableFunction { + TORCH_API VarMeanBackward0() = default; +#else +struct TORCH_API VarMeanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "VarMeanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional correction; + c10::OptionalArray dim; + bool keepdim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ViewBackward0 : public Node { + TORCH_API ViewBackward0() = default; +#else +struct TORCH_API ViewBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ViewBackwardAutogradNestedTensor0 : public Node { + TORCH_API ViewBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API ViewBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ViewAsRealBackward0 : public Node { + TORCH_API ViewAsRealBackward0() = default; +#else +struct TORCH_API ViewAsRealBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsRealBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ViewAsComplexBackward0 : public Node { + TORCH_API ViewAsComplexBackward0() = default; +#else +struct TORCH_API ViewAsComplexBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsComplexBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct WhereBackward0 : public TraceableFunction { + TORCH_API WhereBackward0() = default; +#else +struct TORCH_API WhereBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "WhereBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + condition_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable condition_; + +}; +#ifdef _WIN32 +struct WeightNormInterfaceBackward0 : public TraceableFunction { + TORCH_API WeightNormInterfaceBackward0() = default; +#else +struct TORCH_API WeightNormInterfaceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "WeightNormInterfaceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + g_.reset_data(); + v_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable g_; + SavedVariable v_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct ZeroBackward0 : public TraceableFunction { + TORCH_API ZeroBackward0() = default; +#else +struct TORCH_API ZeroBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ZeroBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct SparseMaskBackward0 : public TraceableFunction { + TORCH_API SparseMaskBackward0() = default; +#else +struct TORCH_API SparseMaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseMaskBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable mask_; + at::Layout self_layout; + +}; +#ifdef _WIN32 +struct SparseCooTensorWithDimsAndTensorsBackward0 : public TraceableFunction { + TORCH_API SparseCooTensorWithDimsAndTensorsBackward0() = default; +#else +struct TORCH_API SparseCooTensorWithDimsAndTensorsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseCooTensorWithDimsAndTensorsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseCompressedTensorBackward0 : public TraceableFunction { + TORCH_API SparseCompressedTensorBackward0() = default; +#else +struct TORCH_API SparseCompressedTensorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseCompressedTensorBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + values_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable values_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseSumBackward0 : public TraceableFunction { + TORCH_API SparseSumBackward0() = default; +#else +struct TORCH_API SparseSumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct StandardGammaBackward0 : public TraceableFunction { + TORCH_API StandardGammaBackward0() = default; +#else +struct TORCH_API StandardGammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StandardGammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct StandardGammaGradBackward0 : public TraceableFunction { + TORCH_API StandardGammaGradBackward0() = default; +#else +struct TORCH_API StandardGammaGradBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StandardGammaGradBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ValuesBackward0 : public Node { + TORCH_API ValuesBackward0() = default; +#else +struct TORCH_API ValuesBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ValuesBackwardAutogradNestedTensor0 : public Node { + TORCH_API ValuesBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API ValuesBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TrilinearBackward0 : public TraceableFunction { + TORCH_API TrilinearBackward0() = default; +#else +struct TORCH_API TrilinearBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TrilinearBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + i1_.reset_data(); + i2_.reset_data(); + i3_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector expand1; + std::vector expand2; + std::vector expand3; + SavedVariable i1_; + SavedVariable i2_; + SavedVariable i3_; + std::vector sumdim; + +}; +#ifdef _WIN32 +struct ConstantPadNdBackward0 : public TraceableFunction { + TORCH_API ConstantPadNdBackward0() = default; +#else +struct TORCH_API ConstantPadNdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConstantPadNdBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector pad; + +}; +#ifdef _WIN32 +struct BinaryCrossEntropyBackward0 : public TraceableFunction { + TORCH_API BinaryCrossEntropyBackward0() = default; +#else +struct TORCH_API BinaryCrossEntropyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BinaryCrossEntropyBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct BinaryCrossEntropyBackwardBackward0 : public TraceableFunction { + TORCH_API BinaryCrossEntropyBackwardBackward0() = default; +#else +struct TORCH_API BinaryCrossEntropyBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BinaryCrossEntropyBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct BinaryCrossEntropyWithLogitsBackward0 : public TraceableFunction { + TORCH_API BinaryCrossEntropyWithLogitsBackward0() = default; +#else +struct TORCH_API BinaryCrossEntropyWithLogitsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "BinaryCrossEntropyWithLogitsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + pos_weight_.reset_data(); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable pos_weight_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct EmbeddingBackward0 : public TraceableFunction { + TORCH_API EmbeddingBackward0() = default; +#else +struct TORCH_API EmbeddingBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + c10::SymInt padding_idx; + bool scale_grad_by_freq; + bool sparse; + c10::SymInt weight_sym_argsize_0; + +}; +#ifdef _WIN32 +struct EmbeddingDenseBackwardBackward0 : public TraceableFunction { + TORCH_API EmbeddingDenseBackwardBackward0() = default; +#else +struct TORCH_API EmbeddingDenseBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingDenseBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + c10::SymInt padding_idx; + +}; +#ifdef _WIN32 +struct EmbeddingBagBackward0 : public TraceableFunction { + TORCH_API EmbeddingBagBackward0() = default; +#else +struct TORCH_API EmbeddingBagBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingBagBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + offsets_.reset_data(); + per_sample_weights_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + result3_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + int64_t mode = 0; + SavedVariable offsets_; + int64_t padding_idx = 0; + SavedVariable per_sample_weights_; + bool scale_grad_by_freq; + bool sparse; + SavedVariable weight_; + c10::SymInt weight_sym_argsize_0; + SavedVariable result1_; + SavedVariable result2_; + SavedVariable result3_; + +}; +#ifdef _WIN32 +struct EmbeddingRenormBackward0 : public TraceableFunction { + TORCH_API EmbeddingRenormBackward0() = default; +#else +struct TORCH_API EmbeddingRenormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EmbeddingRenormBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct MseLossBackward0 : public TraceableFunction { + TORCH_API MseLossBackward0() = default; +#else +struct TORCH_API MseLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MseLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct MultiMarginLossBackward0 : public TraceableFunction { + TORCH_API MultiMarginLossBackward0() = default; +#else +struct TORCH_API MultiMarginLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MultiMarginLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar margin; + at::Scalar p; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MultilabelMarginLossBackward0 : public TraceableFunction { + TORCH_API MultilabelMarginLossBackward0() = default; +#else +struct TORCH_API MultilabelMarginLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MultilabelMarginLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + is_target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable is_target_; + +}; +#ifdef _WIN32 +struct NllLossBackward0 : public TraceableFunction { + TORCH_API NllLossBackward0() = default; +#else +struct TORCH_API NllLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + total_weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + SavedVariable total_weight_; + +}; +#ifdef _WIN32 +struct NllLoss2DBackward0 : public TraceableFunction { + TORCH_API NllLoss2DBackward0() = default; +#else +struct TORCH_API NllLoss2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLoss2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + weight_.reset_data(); + total_weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + SavedVariable weight_; + SavedVariable total_weight_; + +}; +#ifdef _WIN32 +struct SmoothL1LossBackward0 : public TraceableFunction { + TORCH_API SmoothL1LossBackward0() = default; +#else +struct TORCH_API SmoothL1LossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SmoothL1LossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double beta; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct HuberLossBackward0 : public TraceableFunction { + TORCH_API HuberLossBackward0() = default; +#else +struct TORCH_API HuberLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HuberLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double delta; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct SoftMarginLossBackward0 : public TraceableFunction { + TORCH_API SoftMarginLossBackward0() = default; +#else +struct TORCH_API SoftMarginLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftMarginLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct ReluBackward0 : public TraceableFunction { + TORCH_API ReluBackward0() = default; +#else +struct TORCH_API ReluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SiluBackward0 : public TraceableFunction { + TORCH_API SiluBackward0() = default; +#else +struct TORCH_API SiluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SiluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MishBackward0 : public TraceableFunction { + TORCH_API MishBackward0() = default; +#else +struct TORCH_API MishBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MishBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct EluBackward0 : public TraceableFunction { + TORCH_API EluBackward0() = default; +#else +struct TORCH_API EluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar input_scale; + at::Scalar scale; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct EluBackward1 : public TraceableFunction { + TORCH_API EluBackward1() = default; +#else +struct TORCH_API EluBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EluBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar input_scale; + at::Scalar scale; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct CeluBackward0 : public TraceableFunction { + TORCH_API CeluBackward0() = default; +#else +struct TORCH_API CeluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CeluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CeluBackward1 : public TraceableFunction { + TORCH_API CeluBackward1() = default; +#else +struct TORCH_API CeluBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CeluBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct GeluBackward0 : public TraceableFunction { + TORCH_API GeluBackward0() = default; +#else +struct TORCH_API GeluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::string approximate; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct GeluBackwardBackward0 : public TraceableFunction { + TORCH_API GeluBackwardBackward0() = default; +#else +struct TORCH_API GeluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GeluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::string approximate; + SavedVariable grad_output_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct GluBackward0 : public TraceableFunction { + TORCH_API GluBackward0() = default; +#else +struct TORCH_API GluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardshrinkBackward0 : public TraceableFunction { + TORCH_API HardshrinkBackward0() = default; +#else +struct TORCH_API HardshrinkBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardshrinkBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardshrinkBackwardBackward0 : public TraceableFunction { + TORCH_API HardshrinkBackwardBackward0() = default; +#else +struct TORCH_API HardshrinkBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardshrinkBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardtanhBackward0 : public TraceableFunction { + TORCH_API HardtanhBackward0() = default; +#else +struct TORCH_API HardtanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardtanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar max_val; + at::Scalar min_val; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LeakyReluBackward0 : public TraceableFunction { + TORCH_API LeakyReluBackward0() = default; +#else +struct TORCH_API LeakyReluBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeakyReluBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar negative_slope; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LeakyReluBackward1 : public TraceableFunction { + TORCH_API LeakyReluBackward1() = default; +#else +struct TORCH_API LeakyReluBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeakyReluBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar negative_slope; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct LogSigmoidBackward0 : public TraceableFunction { + TORCH_API LogSigmoidBackward0() = default; +#else +struct TORCH_API LogSigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + buffer_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable buffer_; + +}; +#ifdef _WIN32 +struct LogSoftmaxBackward0 : public TraceableFunction { + TORCH_API LogSoftmaxBackward0() = default; +#else +struct TORCH_API LogSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::ScalarType self_scalar_type; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseLogSoftmaxBackward0 : public TraceableFunction { + TORCH_API SparseLogSoftmaxBackward0() = default; +#else +struct TORCH_API SparseLogSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseLogSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MaskedSoftmaxBackward0 : public TraceableFunction { + TORCH_API MaskedSoftmaxBackward0() = default; +#else +struct TORCH_API MaskedSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaskedSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mask_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::optional dim; + SavedVariable mask_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PreluKernelBackward0 : public TraceableFunction { + TORCH_API PreluKernelBackward0() = default; +#else +struct TORCH_API PreluKernelBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PreluKernelBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct PreluKernelBackwardBackward0 : public TraceableFunction { + TORCH_API PreluKernelBackwardBackward0() = default; +#else +struct TORCH_API PreluKernelBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PreluKernelBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + at::TensorOptions grad_output_options; + SavedVariable self_; + torch::autograd::generated::TypeAndSize self_info; + at::TensorOptions self_options; + SavedVariable weight_; + at::TensorOptions weight_options; + +}; +#ifdef _WIN32 +struct RreluWithNoiseBackward0 : public TraceableFunction { + TORCH_API RreluWithNoiseBackward0() = default; +#else +struct TORCH_API RreluWithNoiseBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RreluWithNoiseBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + noise_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lower; + SavedVariable noise_; + SavedVariable self_; + bool training; + at::Scalar upper; + +}; +#ifdef _WIN32 +struct RreluWithNoiseBackward1 : public TraceableFunction { + TORCH_API RreluWithNoiseBackward1() = default; +#else +struct TORCH_API RreluWithNoiseBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RreluWithNoiseBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + noise_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lower; + SavedVariable noise_; + bool training; + at::Scalar upper; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SoftmaxBackward0 : public TraceableFunction { + TORCH_API SoftmaxBackward0() = default; +#else +struct TORCH_API SoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::ScalarType self_scalar_type; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseSoftmaxBackward0 : public TraceableFunction { + TORCH_API SparseSoftmaxBackward0() = default; +#else +struct TORCH_API SparseSoftmaxBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSoftmaxBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct SparseSparseMatmulBackward0 : public TraceableFunction { + TORCH_API SparseSparseMatmulBackward0() = default; +#else +struct TORCH_API SparseSparseMatmulBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSparseMatmulBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SoftplusBackward0 : public TraceableFunction { + TORCH_API SoftplusBackward0() = default; +#else +struct TORCH_API SoftplusBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftplusBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar beta; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct SoftshrinkBackward0 : public TraceableFunction { + TORCH_API SoftshrinkBackward0() = default; +#else +struct TORCH_API SoftshrinkBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftshrinkBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ThresholdBackward0 : public TraceableFunction { + TORCH_API ThresholdBackward0() = default; +#else +struct TORCH_API ThresholdBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThresholdBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct ThresholdBackward1 : public TraceableFunction { + TORCH_API ThresholdBackward1() = default; +#else +struct TORCH_API ThresholdBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThresholdBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct ReflectionPad1DBackward0 : public TraceableFunction { + TORCH_API ReflectionPad1DBackward0() = default; +#else +struct TORCH_API ReflectionPad1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad1DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReflectionPad2DBackward0 : public TraceableFunction { + TORCH_API ReflectionPad2DBackward0() = default; +#else +struct TORCH_API ReflectionPad2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReflectionPad3DBackward0 : public TraceableFunction { + TORCH_API ReflectionPad3DBackward0() = default; +#else +struct TORCH_API ReflectionPad3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReplicationPad1DBackward0 : public TraceableFunction { + TORCH_API ReplicationPad1DBackward0() = default; +#else +struct TORCH_API ReplicationPad1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad1DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReplicationPad2DBackward0 : public TraceableFunction { + TORCH_API ReplicationPad2DBackward0() = default; +#else +struct TORCH_API ReplicationPad2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ReplicationPad3DBackward0 : public TraceableFunction { + TORCH_API ReplicationPad3DBackward0() = default; +#else +struct TORCH_API ReplicationPad3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct UpsampleLinear1DBackward0 : public TraceableFunction { + TORCH_API UpsampleLinear1DBackward0() = default; +#else +struct TORCH_API UpsampleLinear1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleLinear1DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DAaBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DAaBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DAaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DAaBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DAaBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DAaBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DAaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DAaBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleTrilinear3DBackward0 : public TraceableFunction { + TORCH_API UpsampleTrilinear3DBackward0() = default; +#else +struct TORCH_API UpsampleTrilinear3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleTrilinear3DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearest1DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest1DBackward0() = default; +#else +struct TORCH_API UpsampleNearest1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest1DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact1DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact1DBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact1DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact1DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearest2DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest2DBackward0() = default; +#else +struct TORCH_API UpsampleNearest2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact2DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact2DBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact2DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearest3DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest3DBackward0() = default; +#else +struct TORCH_API UpsampleNearest3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest3DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact3DBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact3DBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact3DBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct PixelShuffleBackward0 : public TraceableFunction { + TORCH_API PixelShuffleBackward0() = default; +#else +struct TORCH_API PixelShuffleBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PixelShuffleBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t upscale_factor = 0; + +}; +#ifdef _WIN32 +struct PixelUnshuffleBackward0 : public TraceableFunction { + TORCH_API PixelUnshuffleBackward0() = default; +#else +struct TORCH_API PixelUnshuffleBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PixelUnshuffleBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t downscale_factor = 0; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool2DBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool2DBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool3DBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool3DBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool2DBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool2DBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool3DBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool3DBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct AvgPool2DBackward0 : public TraceableFunction { + TORCH_API AvgPool2DBackward0() = default; +#else +struct TORCH_API AvgPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + +}; +#ifdef _WIN32 +struct AvgPool3DBackward0 : public TraceableFunction { + TORCH_API AvgPool3DBackward0() = default; +#else +struct TORCH_API AvgPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + +}; +#ifdef _WIN32 +struct FractionalMaxPool2DBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool2DBackward0() = default; +#else +struct TORCH_API FractionalMaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector kernel_size; + std::vector output_size; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct FractionalMaxPool3DBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool3DBackward0() = default; +#else +struct TORCH_API FractionalMaxPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector kernel_size; + std::vector output_size; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct LinearBackward0 : public TraceableFunction { + TORCH_API LinearBackward0() = default; +#else +struct TORCH_API LinearBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinearBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable input_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct LinearBackwardBackward0 : public TraceableFunction { + TORCH_API LinearBackwardBackward0() = default; +#else +struct TORCH_API LinearBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LinearBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MaxPool2DBackward0 : public TraceableFunction { + TORCH_API MaxPool2DBackward0() = default; +#else +struct TORCH_API MaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + +}; +#ifdef _WIN32 +struct MpsConvolutionBackward0 : public TraceableFunction { + TORCH_API MpsConvolutionBackward0() = default; +#else +struct TORCH_API MpsConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MpsConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MpsConvolutionBackwardBackward0 : public TraceableFunction { + TORCH_API MpsConvolutionBackwardBackward0() = default; +#else +struct TORCH_API MpsConvolutionBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MpsConvolutionBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + SavedVariable grad_output_; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MaxPool2DWithIndicesBackward0 : public TraceableFunction { + TORCH_API MaxPool2DWithIndicesBackward0() = default; +#else +struct TORCH_API MaxPool2DWithIndicesBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DWithIndicesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct MaxPool3DWithIndicesBackward0 : public TraceableFunction { + TORCH_API MaxPool3DWithIndicesBackward0() = default; +#else +struct TORCH_API MaxPool3DWithIndicesBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool3DWithIndicesBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct MaxUnpool2DBackward0 : public TraceableFunction { + TORCH_API MaxUnpool2DBackward0() = default; +#else +struct TORCH_API MaxUnpool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxUnpool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct MaxUnpool3DBackward0 : public TraceableFunction { + TORCH_API MaxUnpool3DBackward0() = default; +#else +struct TORCH_API MaxUnpool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxUnpool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + +}; +#ifdef _WIN32 +struct ConvolutionBackward0 : public TraceableFunction { + TORCH_API ConvolutionBackward0() = default; +#else +struct TORCH_API ConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionBackward1 : public TraceableFunction { + TORCH_API ConvolutionBackward1() = default; +#else +struct TORCH_API ConvolutionBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionBackwardBackward0 : public TraceableFunction { + TORCH_API ConvolutionBackwardBackward0() = default; +#else +struct TORCH_API ConvolutionBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + SavedVariable grad_output_; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionOverrideableBackward0 : public TraceableFunction { + TORCH_API ConvolutionOverrideableBackward0() = default; +#else +struct TORCH_API ConvolutionOverrideableBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionOverrideableBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvolutionBackwardOverrideableBackward0 : public TraceableFunction { + TORCH_API ConvolutionBackwardOverrideableBackward0() = default; +#else +struct TORCH_API ConvolutionBackwardOverrideableBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvolutionBackwardOverrideableBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + SavedVariable grad_output_; + c10::SymInt groups; + SavedVariable input_; + std::vector output_padding; + std::vector padding; + std::vector stride; + bool transposed; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvTranspose2DBackward0 : public TraceableFunction { + TORCH_API SlowConvTranspose2DBackward0() = default; +#else +struct TORCH_API SlowConvTranspose2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvTranspose2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvTranspose3DBackward0 : public TraceableFunction { + TORCH_API SlowConvTranspose3DBackward0() = default; +#else +struct TORCH_API SlowConvTranspose3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvTranspose3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConv2DBackward0 : public TraceableFunction { + TORCH_API SlowConv2DBackward0() = default; +#else +struct TORCH_API SlowConv2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConv2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConv2DBackwardBackward0 : public TraceableFunction { + TORCH_API SlowConv2DBackwardBackward0() = default; +#else +struct TORCH_API SlowConv2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConv2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvDepthwise2DBackward0 : public TraceableFunction { + TORCH_API ConvDepthwise2DBackward0() = default; +#else +struct TORCH_API ConvDepthwise2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvDepthwise2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct ConvDepthwise3DBackward0 : public TraceableFunction { + TORCH_API ConvDepthwise3DBackward0() = default; +#else +struct TORCH_API ConvDepthwise3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConvDepthwise3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConv3DBackward0 : public TraceableFunction { + TORCH_API SlowConv3DBackward0() = default; +#else +struct TORCH_API SlowConv3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConv3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvDilated2DBackward0 : public TraceableFunction { + TORCH_API SlowConvDilated2DBackward0() = default; +#else +struct TORCH_API SlowConvDilated2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvDilated2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct SlowConvDilated3DBackward0 : public TraceableFunction { + TORCH_API SlowConvDilated3DBackward0() = default; +#else +struct TORCH_API SlowConvDilated3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SlowConvDilated3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct Col2ImBackward0 : public TraceableFunction { + TORCH_API Col2ImBackward0() = default; +#else +struct TORCH_API Col2ImBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Col2ImBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + std::vector stride; + +}; +#ifdef _WIN32 +struct Im2ColBackward0 : public TraceableFunction { + TORCH_API Im2ColBackward0() = default; +#else +struct TORCH_API Im2ColBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "Im2ColBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + c10::SymInt self_sym_argsize_minus_1; + c10::SymInt self_sym_argsize_minus_2; + std::vector stride; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool2DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt grad_output_sym_argsize_minus_1; + c10::SymInt grad_output_sym_argsize_minus_2; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AdaptiveAvgPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveAvgPool3DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveAvgPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveAvgPool3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt grad_output_sym_argsize_minus_1; + c10::SymInt grad_output_sym_argsize_minus_2; + c10::SymInt grad_output_sym_argsize_minus_3; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool2DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AdaptiveMaxPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API AdaptiveMaxPool3DBackwardBackward0() = default; +#else +struct TORCH_API AdaptiveMaxPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AdaptiveMaxPool3DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct AvgPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API AvgPool2DBackwardBackward0() = default; +#else +struct TORCH_API AvgPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + std::vector stride; + +}; +#ifdef _WIN32 +struct AvgPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API AvgPool3DBackwardBackward0() = default; +#else +struct TORCH_API AvgPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AvgPool3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + bool count_include_pad; + c10::optional divisor_override; + std::vector kernel_size; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + std::vector stride; + +}; +#ifdef _WIN32 +struct EluBackwardBackward0 : public TraceableFunction { + TORCH_API EluBackwardBackward0() = default; +#else +struct TORCH_API EluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_or_result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable grad_output_; + at::Scalar input_scale; + bool is_result; + at::Scalar scale; + SavedVariable self_or_result_; + +}; +#ifdef _WIN32 +struct FractionalMaxPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool2DBackwardBackward0() = default; +#else +struct TORCH_API FractionalMaxPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct FractionalMaxPool3DBackwardBackward0 : public TraceableFunction { + TORCH_API FractionalMaxPool3DBackwardBackward0() = default; +#else +struct TORCH_API FractionalMaxPool3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FractionalMaxPool3DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct GluBackwardBackward0 : public TraceableFunction { + TORCH_API GluBackwardBackward0() = default; +#else +struct TORCH_API GluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "GluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable grad_output_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct HardtanhBackwardBackward0 : public TraceableFunction { + TORCH_API HardtanhBackwardBackward0() = default; +#else +struct TORCH_API HardtanhBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HardtanhBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar max_val; + at::Scalar min_val; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogSigmoidBackwardBackward0 : public TraceableFunction { + TORCH_API LogSigmoidBackwardBackward0() = default; +#else +struct TORCH_API LogSigmoidBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSigmoidBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + buffer_.reset_data(); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable buffer_; + SavedVariable grad_output_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct LogSoftmaxBackwardDataBackward0 : public TraceableFunction { + TORCH_API LogSoftmaxBackwardDataBackward0() = default; +#else +struct TORCH_API LogSoftmaxBackwardDataBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LogSoftmaxBackwardDataBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable grad_output_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct LeakyReluBackwardBackward0 : public TraceableFunction { + TORCH_API LeakyReluBackwardBackward0() = default; +#else +struct TORCH_API LeakyReluBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LeakyReluBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar negative_slope; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MaxPool2DBackwardBackward0 : public TraceableFunction { + TORCH_API MaxPool2DBackwardBackward0() = default; +#else +struct TORCH_API MaxPool2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct MaxPool2DWithIndicesBackwardBackward0 : public TraceableFunction { + TORCH_API MaxPool2DWithIndicesBackwardBackward0() = default; +#else +struct TORCH_API MaxPool2DWithIndicesBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool2DWithIndicesBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct MaxPool3DWithIndicesBackwardBackward0 : public TraceableFunction { + TORCH_API MaxPool3DWithIndicesBackwardBackward0() = default; +#else +struct TORCH_API MaxPool3DWithIndicesBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MaxPool3DWithIndicesBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + indices_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable indices_; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct MseLossBackwardBackward0 : public TraceableFunction { + TORCH_API MseLossBackwardBackward0() = default; +#else +struct TORCH_API MseLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MseLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct NllLossBackwardBackward0 : public TraceableFunction { + TORCH_API NllLossBackwardBackward0() = default; +#else +struct TORCH_API NllLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NllLoss2DBackwardBackward0 : public TraceableFunction { + TORCH_API NllLoss2DBackwardBackward0() = default; +#else +struct TORCH_API NllLoss2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NllLoss2DBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + target_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::SymInt ignore_index; + int64_t reduction = 0; + SavedVariable target_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct RreluWithNoiseBackwardBackward0 : public TraceableFunction { + TORCH_API RreluWithNoiseBackwardBackward0() = default; +#else +struct TORCH_API RreluWithNoiseBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "RreluWithNoiseBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + noise_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lower; + SavedVariable noise_; + SavedVariable self_; + bool training; + at::Scalar upper; + +}; +#ifdef _WIN32 +struct ReflectionPad1DBackwardBackward0 : public TraceableFunction { + TORCH_API ReflectionPad1DBackwardBackward0() = default; +#else +struct TORCH_API ReflectionPad1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReflectionPad2DBackwardBackward0 : public TraceableFunction { + TORCH_API ReflectionPad2DBackwardBackward0() = default; +#else +struct TORCH_API ReflectionPad2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReflectionPad3DBackwardBackward0 : public TraceableFunction { + TORCH_API ReflectionPad3DBackwardBackward0() = default; +#else +struct TORCH_API ReflectionPad3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReflectionPad3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReplicationPad1DBackwardBackward0 : public TraceableFunction { + TORCH_API ReplicationPad1DBackwardBackward0() = default; +#else +struct TORCH_API ReplicationPad1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReplicationPad2DBackwardBackward0 : public TraceableFunction { + TORCH_API ReplicationPad2DBackwardBackward0() = default; +#else +struct TORCH_API ReplicationPad2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct ReplicationPad3DBackwardBackward0 : public TraceableFunction { + TORCH_API ReplicationPad3DBackwardBackward0() = default; +#else +struct TORCH_API ReplicationPad3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReplicationPad3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector padding; + torch::autograd::generated::TypeAndSize self_info; + +}; +#ifdef _WIN32 +struct SparseSampledAddmmBackward0 : public TraceableFunction { + TORCH_API SparseSampledAddmmBackward0() = default; +#else +struct TORCH_API SparseSampledAddmmBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseSampledAddmmBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + mat1_.reset_data(); + mat2_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + at::Scalar beta; + SavedVariable mat1_; + SavedVariable mat2_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SparseMmReduceImplBackward0 : public TraceableFunction { + TORCH_API SparseMmReduceImplBackward0() = default; +#else +struct TORCH_API SparseMmReduceImplBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SparseMmReduceImplBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + std::string reduce; + SavedVariable self_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct SmoothL1LossBackwardBackward0 : public TraceableFunction { + TORCH_API SmoothL1LossBackwardBackward0() = default; +#else +struct TORCH_API SmoothL1LossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SmoothL1LossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double beta; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct HuberLossBackwardBackward0 : public TraceableFunction { + TORCH_API HuberLossBackwardBackward0() = default; +#else +struct TORCH_API HuberLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "HuberLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double delta; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct SoftplusBackwardBackward0 : public TraceableFunction { + TORCH_API SoftplusBackwardBackward0() = default; +#else +struct TORCH_API SoftplusBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftplusBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar beta; + SavedVariable grad_output_; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct SoftmaxBackwardDataBackward0 : public TraceableFunction { + TORCH_API SoftmaxBackwardDataBackward0() = default; +#else +struct TORCH_API SoftmaxBackwardDataBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftmaxBackwardDataBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable grad_output_; + at::ScalarType input_dtype; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct SoftMarginLossBackwardBackward0 : public TraceableFunction { + TORCH_API SoftMarginLossBackwardBackward0() = default; +#else +struct TORCH_API SoftMarginLossBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftMarginLossBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + self_.reset_data(); + target_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + int64_t reduction = 0; + SavedVariable self_; + SavedVariable target_; + +}; +#ifdef _WIN32 +struct SoftshrinkBackwardBackward0 : public TraceableFunction { + TORCH_API SoftshrinkBackwardBackward0() = default; +#else +struct TORCH_API SoftshrinkBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SoftshrinkBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar lambd; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ThresholdBackwardBackward0 : public TraceableFunction { + TORCH_API ThresholdBackwardBackward0() = default; +#else +struct TORCH_API ThresholdBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThresholdBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + at::Scalar threshold; + +}; +#ifdef _WIN32 +struct UpsampleLinear1DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleLinear1DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleLinear1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleLinear1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleBilinear2DAaBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBilinear2DAaBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBilinear2DAaBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBilinear2DAaBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleBicubic2DAaBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleBicubic2DAaBackwardBackward0() = default; +#else +struct TORCH_API UpsampleBicubic2DAaBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleBicubic2DAaBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleTrilinear3DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleTrilinear3DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleTrilinear3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleTrilinear3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool align_corners; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearest1DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest1DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearest1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact1DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact1DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact1DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact1DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales; + +}; +#ifdef _WIN32 +struct UpsampleNearest2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearest2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact2DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact2DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact2DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact2DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearest3DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearest3DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearest3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearest3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct UpsampleNearestExact3DBackwardBackward0 : public TraceableFunction { + TORCH_API UpsampleNearestExact3DBackwardBackward0() = default; +#else +struct TORCH_API UpsampleNearestExact3DBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UpsampleNearestExact3DBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector output_size; + c10::optional scales_d; + c10::optional scales_h; + c10::optional scales_w; + +}; +#ifdef _WIN32 +struct SigmoidBackwardBackward0 : public TraceableFunction { + TORCH_API SigmoidBackwardBackward0() = default; +#else +struct TORCH_API SigmoidBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SigmoidBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct TanhBackwardBackward0 : public TraceableFunction { + TORCH_API TanhBackwardBackward0() = default; +#else +struct TORCH_API TanhBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TanhBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + output_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grad_output_; + SavedVariable output_; + +}; +#ifdef _WIN32 +struct CudnnCtcLossBackward0 : public TraceableFunction { + TORCH_API CudnnCtcLossBackward0() = default; +#else +struct TORCH_API CudnnCtcLossBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnCtcLossBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct CudnnCtcLossBackward1 : public TraceableFunction { + TORCH_API CudnnCtcLossBackward1() = default; +#else +struct TORCH_API CudnnCtcLossBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnCtcLossBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result0_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool zero_infinity; + SavedVariable result0_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct CudnnConvolutionTransposeBackward0 : public TraceableFunction { + TORCH_API CudnnConvolutionTransposeBackward0() = default; +#else +struct TORCH_API CudnnConvolutionTransposeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnConvolutionTransposeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MpsConvolutionTransposeBackward0 : public TraceableFunction { + TORCH_API MpsConvolutionTransposeBackward0() = default; +#else +struct TORCH_API MpsConvolutionTransposeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MpsConvolutionTransposeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct CudnnConvolutionBackward0 : public TraceableFunction { + TORCH_API CudnnConvolutionBackward0() = default; +#else +struct TORCH_API CudnnConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct CudnnGridSamplerBackward0 : public TraceableFunction { + TORCH_API CudnnGridSamplerBackward0() = default; +#else +struct TORCH_API CudnnGridSamplerBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnGridSamplerBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grid_.reset_data(); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable grid_; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct CudnnAffineGridGeneratorBackward0 : public TraceableFunction { + TORCH_API CudnnAffineGridGeneratorBackward0() = default; +#else +struct TORCH_API CudnnAffineGridGeneratorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnAffineGridGeneratorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t C = 0; + int64_t H = 0; + int64_t N = 0; + int64_t W = 0; + +}; +#ifdef _WIN32 +struct CudnnBatchNormBackward0 : public TraceableFunction { + TORCH_API CudnnBatchNormBackward0() = default; +#else +struct TORCH_API CudnnBatchNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnBatchNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + result3_.reset_data(); + } + bool retain_variables = true; + void will_release_variables() override { + retain_variables = false; + } + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + SavedVariable result3_; + +}; +#ifdef _WIN32 +struct CudnnBatchNormBackwardBackward0 : public TraceableFunction { + TORCH_API CudnnBatchNormBackwardBackward0() = default; +#else +struct TORCH_API CudnnBatchNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnBatchNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + reserveSpace_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + save_mean_.reset_data(); + save_var_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable grad_output_; + SavedVariable input_; + SavedVariable reserveSpace_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable save_mean_; + SavedVariable save_var_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct NnpackSpatialConvolutionBackward0 : public TraceableFunction { + TORCH_API NnpackSpatialConvolutionBackward0() = default; +#else +struct TORCH_API NnpackSpatialConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NnpackSpatialConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + SavedVariable input_; + std::vector padding; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct LstmMpsBackward0 : public TraceableFunction { + TORCH_API LstmMpsBackward0() = default; +#else +struct TORCH_API LstmMpsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LstmMpsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + hx_.clear(); + hx_released_ = true; + input_.reset_data(); + params_.clear(); + params_released_ = true; + result3_.reset_data(); + result4_.reset_data(); + result5_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + bool bidirectional; + double dropout; + bool has_biases; + std::vector hx_; + bool hx_released_ = false; + SavedVariable input_; + int64_t num_layers = 0; + std::vector params_; + bool params_released_ = false; + bool train; + SavedVariable result3_; + SavedVariable result4_; + SavedVariable result5_; + size_t hx_size_; + size_t params_size_; +}; +#ifdef _WIN32 +struct CudnnRnnBackward0 : public TraceableFunction { + TORCH_API CudnnRnnBackward0() = default; +#else +struct TORCH_API CudnnRnnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnRnnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx_.reset_data(); + dropout_state_.reset_data(); + hx_.reset_data(); + input_.reset_data(); + weight_.clear(); + weight_released_ = true; + result0_.reset_data(); + result3_.reset_data(); + result4_.reset_data(); + } + bool retain_variables = true; + void will_release_variables() override { + retain_variables = false; + } + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector batch_sizes; + bool bidirectional; + SavedVariable cx_; + double dropout; + SavedVariable dropout_state_; + c10::SymInt hidden_size; + SavedVariable hx_; + SavedVariable input_; + int64_t mode = 0; + int64_t num_layers = 0; + c10::SymInt proj_size; + bool train; + std::vector weight_; + bool weight_released_ = false; + int64_t weight_stride0 = 0; + SavedVariable result0_; + SavedVariable result3_; + SavedVariable result4_; + size_t weight_size_; +}; +#ifdef _WIN32 +struct CudnnRnnBackwardBackward0 : public TraceableFunction { + TORCH_API CudnnRnnBackwardBackward0() = default; +#else +struct TORCH_API CudnnRnnBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "CudnnRnnBackwardBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t weight_size_; +}; +#ifdef _WIN32 +struct MiopenConvolutionTransposeBackward0 : public TraceableFunction { + TORCH_API MiopenConvolutionTransposeBackward0() = default; +#else +struct TORCH_API MiopenConvolutionTransposeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenConvolutionTransposeBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector output_padding; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenConvolutionBackward0 : public TraceableFunction { + TORCH_API MiopenConvolutionBackward0() = default; +#else +struct TORCH_API MiopenConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenDepthwiseConvolutionBackward0 : public TraceableFunction { + TORCH_API MiopenDepthwiseConvolutionBackward0() = default; +#else +struct TORCH_API MiopenDepthwiseConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenDepthwiseConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenBatchNormBackward0 : public TraceableFunction { + TORCH_API MiopenBatchNormBackward0() = default; +#else +struct TORCH_API MiopenBatchNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenBatchNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + weight_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + bool training; + SavedVariable weight_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct MiopenBatchNormBackwardBackward0 : public TraceableFunction { + TORCH_API MiopenBatchNormBackwardBackward0() = default; +#else +struct TORCH_API MiopenBatchNormBackwardBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenBatchNormBackwardBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + grad_output_.reset_data(); + input_.reset_data(); + running_mean_.reset_data(); + running_var_.reset_data(); + save_mean_.reset_data(); + save_var_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double epsilon; + SavedVariable grad_output_; + SavedVariable input_; + SavedVariable running_mean_; + SavedVariable running_var_; + SavedVariable save_mean_; + SavedVariable save_var_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MiopenRnnBackward0 : public TraceableFunction { + TORCH_API MiopenRnnBackward0() = default; +#else +struct TORCH_API MiopenRnnBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MiopenRnnBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx_.reset_data(); + dropout_state_.reset_data(); + hx_.reset_data(); + input_.reset_data(); + weight_.clear(); + weight_released_ = true; + result0_.reset_data(); + result3_.reset_data(); + result4_.reset_data(); + } + bool retain_variables = true; + void will_release_variables() override { + retain_variables = false; + } + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector batch_sizes; + bool bidirectional; + SavedVariable cx_; + double dropout; + SavedVariable dropout_state_; + int64_t hidden_size = 0; + SavedVariable hx_; + SavedVariable input_; + int64_t mode = 0; + int64_t num_layers = 0; + bool train; + std::vector weight_; + bool weight_released_ = false; + int64_t weight_stride0 = 0; + SavedVariable result0_; + SavedVariable result3_; + SavedVariable result4_; + size_t weight_size_; +}; +#ifdef _WIN32 +struct MkldnnRnnLayerBackward0 : public TraceableFunction { + TORCH_API MkldnnRnnLayerBackward0() = default; +#else +struct TORCH_API MkldnnRnnLayerBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnRnnLayerBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx__.reset_data(); + hx__.reset_data(); + input_.reset_data(); + weight0_.reset_data(); + weight1_.reset_data(); + weight2_.reset_data(); + weight3_.reset_data(); + result0_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + result3_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector batch_sizes; + bool bidirectional; + SavedVariable cx__; + bool has_biases; + int64_t hidden_size = 0; + SavedVariable hx__; + SavedVariable input_; + int64_t mode = 0; + int64_t num_layers = 0; + bool reverse; + bool train; + SavedVariable weight0_; + SavedVariable weight1_; + SavedVariable weight2_; + SavedVariable weight3_; + SavedVariable result0_; + SavedVariable result1_; + SavedVariable result2_; + SavedVariable result3_; + +}; +#ifdef _WIN32 +struct MkldnnConvolutionBackward0 : public TraceableFunction { + TORCH_API MkldnnConvolutionBackward0() = default; +#else +struct TORCH_API MkldnnConvolutionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnConvolutionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + c10::OptionalArray bias_sym_sizes_opt; + std::vector dilation; + c10::SymInt groups; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MkldnnLinearBackward0 : public TraceableFunction { + TORCH_API MkldnnLinearBackward0() = default; +#else +struct TORCH_API MkldnnLinearBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnLinearBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + weight_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + SavedVariable weight_; + +}; +#ifdef _WIN32 +struct MkldnnMaxPool2DBackward0 : public TraceableFunction { + TORCH_API MkldnnMaxPool2DBackward0() = default; +#else +struct TORCH_API MkldnnMaxPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnMaxPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MkldnnMaxPool3DBackward0 : public TraceableFunction { + TORCH_API MkldnnMaxPool3DBackward0() = default; +#else +struct TORCH_API MkldnnMaxPool3DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnMaxPool3DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool ceil_mode; + std::vector dilation; + std::vector kernel_size; + std::vector padding; + SavedVariable self_; + std::vector stride; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct MkldnnAdaptiveAvgPool2DBackward0 : public TraceableFunction { + TORCH_API MkldnnAdaptiveAvgPool2DBackward0() = default; +#else +struct TORCH_API MkldnnAdaptiveAvgPool2DBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnAdaptiveAvgPool2DBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct MkldnnReshapeBackward0 : public TraceableFunction { + TORCH_API MkldnnReshapeBackward0() = default; +#else +struct TORCH_API MkldnnReshapeBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "MkldnnReshapeBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct NestedTensorFromTensorListBackward0 : public TraceableFunction { + TORCH_API NestedTensorFromTensorListBackward0() = default; +#else +struct TORCH_API NestedTensorFromTensorListBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedTensorFromTensorListBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + list_.clear(); + list_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector list_; + bool list_released_ = false; + size_t list_size_; +}; +#ifdef _WIN32 +struct NestedTensorFromMaskBackward0 : public TraceableFunction { + TORCH_API NestedTensorFromMaskBackward0() = default; +#else +struct TORCH_API NestedTensorFromMaskBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedTensorFromMaskBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector t_sym_sizes; + +}; +#ifdef _WIN32 +struct NestedFromPaddedBackward0 : public TraceableFunction { + TORCH_API NestedFromPaddedBackward0() = default; +#else +struct TORCH_API NestedFromPaddedBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedFromPaddedBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + padded_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool fuse_transform_0213; + SavedVariable padded_; + +}; +#ifdef _WIN32 +struct ToPaddedTensorBackward0 : public TraceableFunction { + TORCH_API ToPaddedTensorBackward0() = default; +#else +struct TORCH_API ToPaddedTensorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ToPaddedTensorBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NestedViewFromBufferBackward0 : public Node { + TORCH_API NestedViewFromBufferBackward0() = default; +#else +struct TORCH_API NestedViewFromBufferBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedViewFromBufferBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ScaledDotProductEfficientAttentionBackward0 : public TraceableFunction { + TORCH_API ScaledDotProductEfficientAttentionBackward0() = default; +#else +struct TORCH_API ScaledDotProductEfficientAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScaledDotProductEfficientAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + attn_bias_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + log_sumexp_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable attn_bias_; + double dropout_p; + bool is_causal; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable log_sumexp_; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + +}; +#ifdef _WIN32 +struct ScaledDotProductFlashAttentionBackward0 : public TraceableFunction { + TORCH_API ScaledDotProductFlashAttentionBackward0() = default; +#else +struct TORCH_API ScaledDotProductFlashAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScaledDotProductFlashAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + cum_seq_k_.reset_data(); + cum_seq_q_.reset_data(); + logsumexp_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + double dropout_p; + bool is_causal; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable cum_seq_k_; + SavedVariable cum_seq_q_; + SavedVariable logsumexp_; + c10::SymInt max_k; + c10::SymInt max_q; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + +}; +#ifdef _WIN32 +struct FlashAttentionBackward0 : public TraceableFunction { + TORCH_API FlashAttentionBackward0() = default; +#else +struct TORCH_API FlashAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FlashAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cum_seq_k_.reset_data(); + cum_seq_q_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + softmax_logsumexp_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable cum_seq_k_; + SavedVariable cum_seq_q_; + double dropout_p; + bool is_causal; + SavedVariable key_; + c10::SymInt max_k; + c10::SymInt max_q; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + SavedVariable softmax_logsumexp_; + +}; +#ifdef _WIN32 +struct EfficientAttentionBackward0 : public TraceableFunction { + TORCH_API EfficientAttentionBackward0() = default; +#else +struct TORCH_API EfficientAttentionBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "EfficientAttentionBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + bias_.reset_data(); + cu_seqlens_k_.reset_data(); + cu_seqlens_q_.reset_data(); + key_.reset_data(); + query_.reset_data(); + value_.reset_data(); + logsumexp_.reset_data(); + output_.reset_data(); + philox_offset_.reset_data(); + philox_seed_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable bias_; + SavedVariable cu_seqlens_k_; + SavedVariable cu_seqlens_q_; + int64_t custom_mask_type = 0; + double dropout_p; + SavedVariable key_; + SavedVariable query_; + c10::optional scale; + SavedVariable value_; + SavedVariable logsumexp_; + c10::SymInt max_seqlen_batch_k; + c10::SymInt max_seqlen_batch_q; + SavedVariable output_; + SavedVariable philox_offset_; + SavedVariable philox_seed_; + +}; +#ifdef _WIN32 +struct FftR2CBackward0 : public TraceableFunction { + TORCH_API FftR2CBackward0() = default; +#else +struct TORCH_API FftR2CBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FftR2CBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t normalization = 0; + bool onesided; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct FftC2RBackward0 : public TraceableFunction { + TORCH_API FftC2RBackward0() = default; +#else +struct TORCH_API FftC2RBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FftC2RBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t normalization = 0; + +}; +#ifdef _WIN32 +struct FftC2CBackward0 : public TraceableFunction { + TORCH_API FftC2CBackward0() = default; +#else +struct TORCH_API FftC2CBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "FftC2CBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + bool forward; + int64_t normalization = 0; + +}; +#ifdef _WIN32 +struct UnbindBackward0 : public Node { + TORCH_API UnbindBackward0() = default; +#else +struct TORCH_API UnbindBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct UnbindBackwardAutogradNestedTensor0 : public Node { + TORCH_API UnbindBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API UnbindBackwardAutogradNestedTensor0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackwardAutogradNestedTensor0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct StackBackward0 : public TraceableFunction { + TORCH_API StackBackward0() = default; +#else +struct TORCH_API StackBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "StackBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + ::std::vector tensors_args_scalartypes; + size_t tensors_size_; +}; +#ifdef _WIN32 +struct ThnnFusedLstmCellBackward0 : public TraceableFunction { + TORCH_API ThnnFusedLstmCellBackward0() = default; +#else +struct TORCH_API ThnnFusedLstmCellBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThnnFusedLstmCellBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + cx_.reset_data(); + hidden_bias_.reset_data(); + hidden_gates_.reset_data(); + input_bias_.reset_data(); + input_gates_.reset_data(); + result1_.reset_data(); + result2_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable cx_; + SavedVariable hidden_bias_; + SavedVariable hidden_gates_; + SavedVariable input_bias_; + SavedVariable input_gates_; + SavedVariable result1_; + SavedVariable result2_; + +}; +#ifdef _WIN32 +struct ThnnFusedGruCellBackward0 : public TraceableFunction { + TORCH_API ThnnFusedGruCellBackward0() = default; +#else +struct TORCH_API ThnnFusedGruCellBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ThnnFusedGruCellBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + hidden_bias_.reset_data(); + hidden_gates_.reset_data(); + hx_.reset_data(); + input_bias_.reset_data(); + input_gates_.reset_data(); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable hidden_bias_; + SavedVariable hidden_gates_; + SavedVariable hx_; + SavedVariable input_bias_; + SavedVariable input_gates_; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct PackPaddedSequenceBackward0 : public TraceableFunction { + TORCH_API PackPaddedSequenceBackward0() = default; +#else +struct TORCH_API PackPaddedSequenceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PackPaddedSequenceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result1_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + bool batch_first; + std::vector input_sym_sizes; + SavedVariable result1_; + +}; +#ifdef _WIN32 +struct SegmentReduceBackward0 : public TraceableFunction { + TORCH_API SegmentReduceBackward0() = default; +#else +struct TORCH_API SegmentReduceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SegmentReduceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + data_.reset_data(); + lengths_.reset_data(); + offsets_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t axis = 0; + SavedVariable data_; + c10::optional initial; + SavedVariable lengths_; + SavedVariable offsets_; + std::string reduce; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct PinMemoryBackward0 : public TraceableFunction { + TORCH_API PinMemoryBackward0() = default; +#else +struct TORCH_API PinMemoryBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PinMemoryBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestWarnInAutogradBackward0 : public TraceableFunction { + TORCH_API TestWarnInAutogradBackward0() = default; +#else +struct TORCH_API TestWarnInAutogradBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestWarnInAutogradBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackward0 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackward0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackwardAutogradNestedTensor0 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradNestedTensor0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackwardAutogradCUDA0 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackwardAutogradCUDA0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradCUDA0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradCUDA0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchBackwardAutogradNestedTensor1 : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor1() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradNestedTensor1"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackward0 : public Node { + TORCH_API TestAutogradMultipleDispatchViewBackward0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackward0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackwardAutogradCUDA0 : public Node { + TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0 : public Node { +#endif + using Node::Node; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackwardAutogradCUDA0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ScatterReduceBackward0 : public TraceableFunction { + TORCH_API ScatterReduceBackward0() = default; +#else +struct TORCH_API ScatterReduceBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ScatterReduceBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + index_.reset_data(); + self_.reset_data(); + src_.reset_data(); + result_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + bool include_self; + SavedVariable index_; + std::string reduce; + SavedVariable self_; + SavedVariable src_; + SavedVariable result_; + +}; +#ifdef _WIN32 +struct ReshapeCopyBackward0 : public TraceableFunction { + TORCH_API ReshapeCopyBackward0() = default; +#else +struct TORCH_API ReshapeCopyBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReshapeCopyBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ForeachDivBackward0 : public TraceableFunction { + TORCH_API ForeachDivBackward0() = default; +#else +struct TORCH_API ForeachDivBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward0 : public TraceableFunction { + TORCH_API ForeachPowBackward0() = default; +#else +struct TORCH_API ForeachPowBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.clear(); + exponent_released_ = true; + self_.clear(); + self_released_ = true; + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector exponent_; + bool exponent_released_ = false; + std::vector self_; + bool self_released_ = false; + std::vector result_; + bool result_released_ = false; + size_t self_size_; + size_t exponent_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward1 : public TraceableFunction { + TORCH_API ForeachPowBackward1() = default; +#else +struct TORCH_API ForeachPowBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector exponent; + bool exponent_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward2 : public TraceableFunction { + TORCH_API ForeachPowBackward2() = default; +#else +struct TORCH_API ForeachPowBackward2 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward2"; } + void release_variables() override { + std::lock_guard lock(mutex_); + exponent_.clear(); + exponent_released_ = true; + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector exponent_; + bool exponent_released_ = false; + at::Scalar self; + std::vector result_; + bool result_released_ = false; + size_t exponent_size_; +}; +#ifdef _WIN32 +struct ForeachMinimumBackward0 : public TraceableFunction { + TORCH_API ForeachMinimumBackward0() = default; +#else +struct TORCH_API ForeachMinimumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMinimumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMinimumBackward1 : public TraceableFunction { + TORCH_API ForeachMinimumBackward1() = default; +#else +struct TORCH_API ForeachMinimumBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMinimumBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMaximumBackward0 : public TraceableFunction { + TORCH_API ForeachMaximumBackward0() = default; +#else +struct TORCH_API ForeachMaximumBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMaximumBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMaximumBackward1 : public TraceableFunction { + TORCH_API ForeachMaximumBackward1() = default; +#else +struct TORCH_API ForeachMaximumBackward1 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMaximumBackward1"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachNormBackward0 : public TraceableFunction { + TORCH_API ForeachNormBackward0() = default; +#else +struct TORCH_API ForeachNormBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachNormBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar ord; + std::vector self_; + bool self_released_ = false; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct AliasBackward0_copy : public TraceableFunction { + TORCH_API AliasBackward0_copy() = default; +#else +struct TORCH_API AliasBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AliasBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct AsStridedBackward0_copy : public TraceableFunction { + TORCH_API AsStridedBackward0_copy() = default; +#else +struct TORCH_API AsStridedBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "AsStridedBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::TensorGeometry self_geometry; + std::vector size; + c10::optional storage_offset; + std::vector stride; + +}; +#ifdef _WIN32 +struct ConjBackward0_copy : public TraceableFunction { + TORCH_API ConjBackward0_copy() = default; +#else +struct TORCH_API ConjBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ConjBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct NegViewBackward0_copy : public TraceableFunction { + TORCH_API NegViewBackward0_copy() = default; +#else +struct TORCH_API NegViewBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NegViewBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct DiagonalBackward0_copy : public TraceableFunction { + TORCH_API DiagonalBackward0_copy() = default; +#else +struct TORCH_API DiagonalBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "DiagonalBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim1 = 0; + int64_t dim2 = 0; + int64_t offset = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ExpandBackward0_copy : public TraceableFunction { + TORCH_API ExpandBackward0_copy() = default; +#else +struct TORCH_API ExpandBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ExpandBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct PermuteBackward0_copy : public TraceableFunction { + TORCH_API PermuteBackward0_copy() = default; +#else +struct TORCH_API PermuteBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "PermuteBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dims; + +}; +#ifdef _WIN32 +struct ReshapeAliasBackward0_copy : public TraceableFunction { + TORCH_API ReshapeAliasBackward0_copy() = default; +#else +struct TORCH_API ReshapeAliasBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ReshapeAliasBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SelectBackward0_copy : public TraceableFunction { + TORCH_API SelectBackward0_copy() = default; +#else +struct TORCH_API SelectBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SelectBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API SelectBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API SelectBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SelectBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::SymInt index; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct SliceBackward0_copy : public TraceableFunction { + TORCH_API SliceBackward0_copy() = default; +#else +struct TORCH_API SliceBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SliceBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + c10::optional end; + std::vector self_sym_sizes; + c10::optional start; + c10::SymInt step; + +}; +#ifdef _WIN32 +struct SplitBackward0_copy : public TraceableFunction { + TORCH_API SplitBackward0_copy() = default; +#else +struct TORCH_API SplitBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + c10::SymInt split_size; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackward0_copy : public TraceableFunction { + TORCH_API SplitWithSizesBackward0_copy() = default; +#else +struct TORCH_API SplitWithSizesBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + at::TensorOptions self_options; + std::vector self_sym_sizes; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SplitWithSizesBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API SplitWithSizesBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API SplitWithSizesBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SplitWithSizesBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + std::vector split_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward0_copy : public TraceableFunction { + TORCH_API SqueezeBackward0_copy() = default; +#else +struct TORCH_API SqueezeBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackward1_copy : public TraceableFunction { + TORCH_API SqueezeBackward1_copy() = default; +#else +struct TORCH_API SqueezeBackward1_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward1_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API SqueezeBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct SqueezeBackward2_copy : public TraceableFunction { + TORCH_API SqueezeBackward2_copy() = default; +#else +struct TORCH_API SqueezeBackward2_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackward2_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct SqueezeBackwardAutogradNestedTensor1_copy : public TraceableFunction { + TORCH_API SqueezeBackwardAutogradNestedTensor1_copy() = default; +#else +struct TORCH_API SqueezeBackwardAutogradNestedTensor1_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "SqueezeBackwardAutogradNestedTensor1_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector dim; + int64_t self_dim = 0; + +}; +#ifdef _WIN32 +struct TBackward0_copy : public TraceableFunction { + TORCH_API TBackward0_copy() = default; +#else +struct TORCH_API TBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct TransposeBackward0_copy : public TraceableFunction { + TORCH_API TransposeBackward0_copy() = default; +#else +struct TORCH_API TransposeBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TransposeBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim0 = 0; + int64_t dim1 = 0; + +}; +#ifdef _WIN32 +struct UnfoldBackward0_copy : public TraceableFunction { + TORCH_API UnfoldBackward0_copy() = default; +#else +struct TORCH_API UnfoldBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnfoldBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dimension = 0; + std::vector self_sym_sizes; + int64_t size = 0; + int64_t step = 0; + +}; +#ifdef _WIN32 +struct LiftFreshBackward0_copy : public TraceableFunction { + TORCH_API LiftFreshBackward0_copy() = default; +#else +struct TORCH_API LiftFreshBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "LiftFreshBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnsqueezeBackward0_copy : public TraceableFunction { + TORCH_API UnsqueezeBackward0_copy() = default; +#else +struct TORCH_API UnsqueezeBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnsqueezeBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct ViewBackward0_copy : public TraceableFunction { + TORCH_API ViewBackward0_copy() = default; +#else +struct TORCH_API ViewBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ViewBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API ViewBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API ViewBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ViewAsRealBackward0_copy : public TraceableFunction { + TORCH_API ViewAsRealBackward0_copy() = default; +#else +struct TORCH_API ViewAsRealBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsRealBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ViewAsComplexBackward0_copy : public TraceableFunction { + TORCH_API ViewAsComplexBackward0_copy() = default; +#else +struct TORCH_API ViewAsComplexBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ViewAsComplexBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct ValuesBackward0_copy : public TraceableFunction { + TORCH_API ValuesBackward0_copy() = default; +#else +struct TORCH_API ValuesBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackward0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + std::vector self_sym_sizes; + +}; +#ifdef _WIN32 +struct ValuesBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API ValuesBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API ValuesBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ValuesBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct NestedViewFromBufferBackward0_copy : public TraceableFunction { + TORCH_API NestedViewFromBufferBackward0_copy() = default; +#else +struct TORCH_API NestedViewFromBufferBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "NestedViewFromBufferBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + +}; +#ifdef _WIN32 +struct UnbindBackward0_copy : public TraceableFunction { + TORCH_API UnbindBackward0_copy() = default; +#else +struct TORCH_API UnbindBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackward0_copy"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + +}; +#ifdef _WIN32 +struct UnbindBackwardAutogradNestedTensor0_copy : public TraceableFunction { + TORCH_API UnbindBackwardAutogradNestedTensor0_copy() = default; +#else +struct TORCH_API UnbindBackwardAutogradNestedTensor0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "UnbindBackwardAutogradNestedTensor0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + int64_t dim = 0; + SavedVariable self_; + at::TensorOptions self_options; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackward0_copy : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchViewBackward0_copy() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackward0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackward0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy : public TraceableFunction { + TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy() = default; +#else +struct TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.reset_data(); + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable self_; + +}; +#ifdef _WIN32 +struct ForeachAbsBackward0 : public TraceableFunction { + TORCH_API ForeachAbsBackward0() = default; +#else +struct TORCH_API ForeachAbsBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAbsBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAcosBackward0 : public TraceableFunction { + TORCH_API ForeachAcosBackward0() = default; +#else +struct TORCH_API ForeachAcosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAcosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward1Scalar : public TraceableFunction { + TORCH_API ForeachAddBackward1Scalar() = default; +#else +struct TORCH_API ForeachAddBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward0List : public TraceableFunction { + TORCH_API ForeachAddBackward0List() = default; +#else +struct TORCH_API ForeachAddBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachAddBackward1ScalarList() = default; +#else +struct TORCH_API ForeachAddBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddBackward0Tensor : public TraceableFunction { + TORCH_API ForeachAddBackward0Tensor() = default; +#else +struct TORCH_API ForeachAddBackward0Tensor : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddBackward0Tensor"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + SavedVariable other_; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAddcdivBackward0Scalar : public TraceableFunction { + TORCH_API ForeachAddcdivBackward0Scalar() = default; +#else +struct TORCH_API ForeachAddcdivBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcdivBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + at::Scalar value; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAddcdivBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachAddcdivBackward0ScalarList() = default; +#else +struct TORCH_API ForeachAddcdivBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcdivBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAddcmulBackward0Scalar : public TraceableFunction { + TORCH_API ForeachAddcmulBackward0Scalar() = default; +#else +struct TORCH_API ForeachAddcmulBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcmulBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + at::Scalar value; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAddcmulBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachAddcmulBackward0ScalarList() = default; +#else +struct TORCH_API ForeachAddcmulBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAddcmulBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + tensor1_.clear(); + tensor1_released_ = true; + tensor2_.clear(); + tensor2_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + std::vector tensor1_; + bool tensor1_released_ = false; + std::vector tensor2_; + bool tensor2_released_ = false; + size_t self_size_; + size_t tensor1_size_; + size_t tensor2_size_; +}; +#ifdef _WIN32 +struct ForeachAsinBackward0 : public TraceableFunction { + TORCH_API ForeachAsinBackward0() = default; +#else +struct TORCH_API ForeachAsinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAsinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachAtanBackward0 : public TraceableFunction { + TORCH_API ForeachAtanBackward0() = default; +#else +struct TORCH_API ForeachAtanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachAtanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachCeilBackward0 : public TraceableFunction { + TORCH_API ForeachCeilBackward0() = default; +#else +struct TORCH_API ForeachCeilBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachCeilBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMaxBackward0Scalar : public TraceableFunction { + TORCH_API ForeachClampMaxBackward0Scalar() = default; +#else +struct TORCH_API ForeachClampMaxBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMaxBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMaxBackward1List : public TraceableFunction { + TORCH_API ForeachClampMaxBackward1List() = default; +#else +struct TORCH_API ForeachClampMaxBackward1List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMaxBackward1List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachClampMaxBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachClampMaxBackward0ScalarList() = default; +#else +struct TORCH_API ForeachClampMaxBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMaxBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMinBackward0Scalar : public TraceableFunction { + TORCH_API ForeachClampMinBackward0Scalar() = default; +#else +struct TORCH_API ForeachClampMinBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMinBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachClampMinBackward1List : public TraceableFunction { + TORCH_API ForeachClampMinBackward1List() = default; +#else +struct TORCH_API ForeachClampMinBackward1List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMinBackward1List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachClampMinBackward0ScalarList : public TraceableFunction { + TORCH_API ForeachClampMinBackward0ScalarList() = default; +#else +struct TORCH_API ForeachClampMinBackward0ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachClampMinBackward0ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachCosBackward0 : public TraceableFunction { + TORCH_API ForeachCosBackward0() = default; +#else +struct TORCH_API ForeachCosBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachCosBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachCoshBackward0 : public TraceableFunction { + TORCH_API ForeachCoshBackward0() = default; +#else +struct TORCH_API ForeachCoshBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachCoshBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachDivBackward1Scalar : public TraceableFunction { + TORCH_API ForeachDivBackward1Scalar() = default; +#else +struct TORCH_API ForeachDivBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachDivBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachDivBackward1ScalarList() = default; +#else +struct TORCH_API ForeachDivBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachDivBackward0Tensor : public TraceableFunction { + TORCH_API ForeachDivBackward0Tensor() = default; +#else +struct TORCH_API ForeachDivBackward0Tensor : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachDivBackward0Tensor"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachErfBackward0 : public TraceableFunction { + TORCH_API ForeachErfBackward0() = default; +#else +struct TORCH_API ForeachErfBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachErfBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachErfcBackward0 : public TraceableFunction { + TORCH_API ForeachErfcBackward0() = default; +#else +struct TORCH_API ForeachErfcBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachErfcBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachExpBackward0 : public TraceableFunction { + TORCH_API ForeachExpBackward0() = default; +#else +struct TORCH_API ForeachExpBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachExpBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachExpm1Backward0 : public TraceableFunction { + TORCH_API ForeachExpm1Backward0() = default; +#else +struct TORCH_API ForeachExpm1Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachExpm1Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachFloorBackward0 : public TraceableFunction { + TORCH_API ForeachFloorBackward0() = default; +#else +struct TORCH_API ForeachFloorBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachFloorBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachFracBackward0 : public TraceableFunction { + TORCH_API ForeachFracBackward0() = default; +#else +struct TORCH_API ForeachFracBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachFracBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLerpBackward1List : public TraceableFunction { + TORCH_API ForeachLerpBackward1List() = default; +#else +struct TORCH_API ForeachLerpBackward1List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLerpBackward1List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + tensors1_.clear(); + tensors1_released_ = true; + weights_.clear(); + weights_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + std::vector tensors1_; + bool tensors1_released_ = false; + std::vector weights_; + bool weights_released_ = false; + size_t self_size_; + size_t tensors1_size_; + size_t weights_size_; +}; +#ifdef _WIN32 +struct ForeachLerpBackward0Scalar : public TraceableFunction { + TORCH_API ForeachLerpBackward0Scalar() = default; +#else +struct TORCH_API ForeachLerpBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLerpBackward0Scalar"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar weight; + size_t self_size_; + size_t tensors1_size_; +}; +#ifdef _WIN32 +struct ForeachLgammaBackward0 : public TraceableFunction { + TORCH_API ForeachLgammaBackward0() = default; +#else +struct TORCH_API ForeachLgammaBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLgammaBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLogBackward0 : public TraceableFunction { + TORCH_API ForeachLogBackward0() = default; +#else +struct TORCH_API ForeachLogBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLogBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLog10Backward0 : public TraceableFunction { + TORCH_API ForeachLog10Backward0() = default; +#else +struct TORCH_API ForeachLog10Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLog10Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLog1PBackward0 : public TraceableFunction { + TORCH_API ForeachLog1PBackward0() = default; +#else +struct TORCH_API ForeachLog1PBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLog1PBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachLog2Backward0 : public TraceableFunction { + TORCH_API ForeachLog2Backward0() = default; +#else +struct TORCH_API ForeachLog2Backward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachLog2Backward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMaximumBackward0List : public TraceableFunction { + TORCH_API ForeachMaximumBackward0List() = default; +#else +struct TORCH_API ForeachMaximumBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMaximumBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachMinimumBackward0List : public TraceableFunction { + TORCH_API ForeachMinimumBackward0List() = default; +#else +struct TORCH_API ForeachMinimumBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMinimumBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward1Scalar : public TraceableFunction { + TORCH_API ForeachMulBackward1Scalar() = default; +#else +struct TORCH_API ForeachMulBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar scalar; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward0List : public TraceableFunction { + TORCH_API ForeachMulBackward0List() = default; +#else +struct TORCH_API ForeachMulBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachMulBackward1ScalarList() = default; +#else +struct TORCH_API ForeachMulBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + scalars.clear(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector scalars; + bool scalars_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachMulBackward0Tensor : public TraceableFunction { + TORCH_API ForeachMulBackward0Tensor() = default; +#else +struct TORCH_API ForeachMulBackward0Tensor : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachMulBackward0Tensor"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.reset_data(); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + SavedVariable other_; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachNegBackward0 : public TraceableFunction { + TORCH_API ForeachNegBackward0() = default; +#else +struct TORCH_API ForeachNegBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachNegBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachPowBackward0Scalar : public TraceableFunction { + TORCH_API ForeachPowBackward0Scalar() = default; +#else +struct TORCH_API ForeachPowBackward0Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachPowBackward0Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar exponent; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachReciprocalBackward0 : public TraceableFunction { + TORCH_API ForeachReciprocalBackward0() = default; +#else +struct TORCH_API ForeachReciprocalBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachReciprocalBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachRoundBackward0 : public TraceableFunction { + TORCH_API ForeachRoundBackward0() = default; +#else +struct TORCH_API ForeachRoundBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachRoundBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSigmoidBackward0 : public TraceableFunction { + TORCH_API ForeachSigmoidBackward0() = default; +#else +struct TORCH_API ForeachSigmoidBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSigmoidBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSignBackward0 : public TraceableFunction { + TORCH_API ForeachSignBackward0() = default; +#else +struct TORCH_API ForeachSignBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSignBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSinBackward0 : public TraceableFunction { + TORCH_API ForeachSinBackward0() = default; +#else +struct TORCH_API ForeachSinBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSinBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSinhBackward0 : public TraceableFunction { + TORCH_API ForeachSinhBackward0() = default; +#else +struct TORCH_API ForeachSinhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSinhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSqrtBackward0 : public TraceableFunction { + TORCH_API ForeachSqrtBackward0() = default; +#else +struct TORCH_API ForeachSqrtBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSqrtBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSubBackward1Scalar : public TraceableFunction { + TORCH_API ForeachSubBackward1Scalar() = default; +#else +struct TORCH_API ForeachSubBackward1Scalar : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSubBackward1Scalar"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachSubBackward0List : public TraceableFunction { + TORCH_API ForeachSubBackward0List() = default; +#else +struct TORCH_API ForeachSubBackward0List : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSubBackward0List"; } + void release_variables() override { + std::lock_guard lock(mutex_); + other_.clear(); + other_released_ = true; + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + at::Scalar alpha; + std::vector other_; + bool other_released_ = false; + std::vector self_; + bool self_released_ = false; + size_t self_size_; + size_t other_size_; +}; +#ifdef _WIN32 +struct ForeachSubBackward1ScalarList : public TraceableFunction { + TORCH_API ForeachSubBackward1ScalarList() = default; +#else +struct TORCH_API ForeachSubBackward1ScalarList : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachSubBackward1ScalarList"; } + void release_variables() override { + std::lock_guard lock(mutex_); + self_.clear(); + self_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector self_; + bool self_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachTanBackward0 : public TraceableFunction { + TORCH_API ForeachTanBackward0() = default; +#else +struct TORCH_API ForeachTanBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachTanBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachTanhBackward0 : public TraceableFunction { + TORCH_API ForeachTanhBackward0() = default; +#else +struct TORCH_API ForeachTanhBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachTanhBackward0"; } + void release_variables() override { + std::lock_guard lock(mutex_); + result_.clear(); + result_released_ = true; + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + std::vector result_; + bool result_released_ = false; + size_t self_size_; +}; +#ifdef _WIN32 +struct ForeachTruncBackward0 : public TraceableFunction { + TORCH_API ForeachTruncBackward0() = default; +#else +struct TORCH_API ForeachTruncBackward0 : public TraceableFunction { +#endif + using TraceableFunction::TraceableFunction; + variable_list apply(variable_list&& grads) override; + std::string name() const override { return "ForeachTruncBackward0"; } + void release_variables() override { + + + } + + void compiled_args(CompiledNodeArgs& args) override; + variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override; + + size_t self_size_; +}; + +}}} // namespace torch::autograd::generated diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h new file mode 100644 index 0000000000000000000000000000000000000000..66ec17fa8c0abfb3efe1f7f73355afd5f6df6e03 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/VariableType.h @@ -0,0 +1,58 @@ +#pragma once + +// @generated from ../tools/autograd/templates/VariableType.h + +#include +#include + +#include + +#include +#include + +#include // for size_t +#include // for function +#include // for unique_ptr +#include +#include + +namespace at { + struct Quantizer; +}; + +namespace torch { namespace autograd { + +using Variable = at::Tensor; +using at::Context; +using at::Device; +using at::Dimname; +using at::DimnameList; +using at::Generator; +using at::IntArrayRef; +using at::MemoryFormat; +using at::QScheme; +using at::Scalar; +using at::ScalarType; +using at::Storage; +using at::Tensor; +using at::TensorList; +using at::TensorOptions; +using at::Quantizer; +// This is temporary typedef to enable Quantizer in aten native function API +// we'll remove them when we are actually exposing Quantizer class +// to frontend +using ConstQuantizerPtr = const c10::intrusive_ptr&; +using c10::optional; + +namespace VariableType { + TORCH_API std::vector allCUDATypes(); + TORCH_API std::vector allXPUTypes(); + TORCH_API std::vector allCPUTypes(); + + at::Tensor & unpack(Tensor & t, const char * name, int pos); + const at::Tensor & unpack(const Tensor & t, const char * name, int pos); + at::Tensor unpack_opt(const Tensor & t, const char * name, int pos); + std::vector unpack(const at::ITensorListRef& tl, const char *name, int pos); +}; + +}} // namespace torch::autograd diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..90e4910e96a347215d587d879bdcee0f52ffbb9f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +// @generated from ../tools/autograd/templates/python_functions.h + +// Python bindings for automatically generated autograd functions + +namespace torch { namespace autograd { namespace generated { + +void initialize_autogenerated_functions_0(PyObject* module); +void initialize_autogenerated_functions_1(PyObject* module); +void initialize_autogenerated_functions_2(PyObject* module); +void initialize_autogenerated_functions_3(PyObject* module); +void initialize_autogenerated_functions_4(PyObject* module); + +inline void initialize_autogenerated_functions(PyObject* module) { + initialize_autogenerated_functions_0(module); + initialize_autogenerated_functions_1(module); + initialize_autogenerated_functions_2(module); + initialize_autogenerated_functions_3(module); + initialize_autogenerated_functions_4(module); +} + +}}} // namespace torch::autograd::generated diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h new file mode 100644 index 0000000000000000000000000000000000000000..8d3a5934d6c5fcf79d295837365a2b9326dbde12 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_return_types.h @@ -0,0 +1,96 @@ +#pragma once + +namespace torch { +namespace autograd { +namespace generated { + +PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_namedtuple(); +PyTypeObject* get__fused_moving_avg_obs_fq_helper_namedtuple(); +PyTypeObject* get__linalg_det_namedtuple(); +PyTypeObject* get__linalg_det_out_namedtuple(); +PyTypeObject* get__linalg_eigh_namedtuple(); +PyTypeObject* get__linalg_eigh_out_namedtuple(); +PyTypeObject* get__linalg_slogdet_namedtuple(); +PyTypeObject* get__linalg_slogdet_out_namedtuple(); +PyTypeObject* get__linalg_solve_ex_namedtuple(); +PyTypeObject* get__linalg_solve_ex_out_namedtuple(); +PyTypeObject* get__linalg_svd_namedtuple(); +PyTypeObject* get__linalg_svd_out_namedtuple(); +PyTypeObject* get__lu_with_info_namedtuple(); +PyTypeObject* get__scaled_dot_product_efficient_attention_namedtuple(); +PyTypeObject* get__scaled_dot_product_flash_attention_namedtuple(); +PyTypeObject* get__unpack_dual_namedtuple(); +PyTypeObject* get_aminmax_namedtuple(); +PyTypeObject* get_aminmax_out_namedtuple(); +PyTypeObject* get_cummax_namedtuple(); +PyTypeObject* get_cummax_out_namedtuple(); +PyTypeObject* get_cummin_namedtuple(); +PyTypeObject* get_cummin_out_namedtuple(); +PyTypeObject* get_frexp_namedtuple(); +PyTypeObject* get_frexp_out_namedtuple(); +PyTypeObject* get_geqrf_out_namedtuple(); +PyTypeObject* get_geqrf_namedtuple(); +PyTypeObject* get_histogram_out_namedtuple(); +PyTypeObject* get_histogram_namedtuple(); +PyTypeObject* get_histogramdd_namedtuple(); +PyTypeObject* get_kthvalue_namedtuple(); +PyTypeObject* get_kthvalue_out_namedtuple(); +PyTypeObject* get_linalg_cholesky_ex_namedtuple(); +PyTypeObject* get_linalg_cholesky_ex_out_namedtuple(); +PyTypeObject* get_linalg_eig_namedtuple(); +PyTypeObject* get_linalg_eig_out_namedtuple(); +PyTypeObject* get_linalg_eigh_namedtuple(); +PyTypeObject* get_linalg_eigh_out_namedtuple(); +PyTypeObject* get_linalg_inv_ex_namedtuple(); +PyTypeObject* get_linalg_inv_ex_out_namedtuple(); +PyTypeObject* get_linalg_ldl_factor_namedtuple(); +PyTypeObject* get_linalg_ldl_factor_out_namedtuple(); +PyTypeObject* get_linalg_ldl_factor_ex_namedtuple(); +PyTypeObject* get_linalg_ldl_factor_ex_out_namedtuple(); +PyTypeObject* get_linalg_lstsq_namedtuple(); +PyTypeObject* get_linalg_lstsq_out_namedtuple(); +PyTypeObject* get_linalg_lu_namedtuple(); +PyTypeObject* get_linalg_lu_out_namedtuple(); +PyTypeObject* get_linalg_lu_factor_namedtuple(); +PyTypeObject* get_linalg_lu_factor_out_namedtuple(); +PyTypeObject* get_linalg_lu_factor_ex_namedtuple(); +PyTypeObject* get_linalg_lu_factor_ex_out_namedtuple(); +PyTypeObject* get_linalg_qr_namedtuple(); +PyTypeObject* get_linalg_qr_out_namedtuple(); +PyTypeObject* get_linalg_slogdet_namedtuple(); +PyTypeObject* get_linalg_slogdet_out_namedtuple(); +PyTypeObject* get_linalg_solve_ex_namedtuple(); +PyTypeObject* get_linalg_solve_ex_out_namedtuple(); +PyTypeObject* get_linalg_svd_namedtuple(); +PyTypeObject* get_linalg_svd_out_namedtuple(); +PyTypeObject* get_lu_unpack_namedtuple(); +PyTypeObject* get_lu_unpack_out_namedtuple(); +PyTypeObject* get_max_namedtuple(); +PyTypeObject* get_max_out_namedtuple(); +PyTypeObject* get_median_namedtuple(); +PyTypeObject* get_median_out_namedtuple(); +PyTypeObject* get_min_namedtuple(); +PyTypeObject* get_min_out_namedtuple(); +PyTypeObject* get_mode_namedtuple(); +PyTypeObject* get_mode_out_namedtuple(); +PyTypeObject* get_nanmedian_namedtuple(); +PyTypeObject* get_nanmedian_out_namedtuple(); +PyTypeObject* get_qr_out_namedtuple(); +PyTypeObject* get_qr_namedtuple(); +PyTypeObject* get_slogdet_namedtuple(); +PyTypeObject* get_slogdet_out_namedtuple(); +PyTypeObject* get_sort_out_namedtuple(); +PyTypeObject* get_sort_namedtuple(); +PyTypeObject* get_svd_out_namedtuple(); +PyTypeObject* get_svd_namedtuple(); +PyTypeObject* get_topk_out_namedtuple(); +PyTypeObject* get_topk_namedtuple(); +PyTypeObject* get_triangular_solve_out_namedtuple(); +PyTypeObject* get_triangular_solve_namedtuple(); + +} + +void initReturnTypes(PyObject* module); + +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h new file mode 100644 index 0000000000000000000000000000000000000000..da655883f3f69db316a23362de485ba412d212b5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/error_messages.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace autograd { +namespace utils { + +inline std::string requires_grad_leaf_error(bool requires_grad) { + std::ostringstream oss; + oss << "you can only change requires_grad flags of leaf variables."; + if (requires_grad == false) { + oss << " If you want to use a computed variable in a subgraph " + "that doesn't require differentiation use " + "var_no_grad = var.detach()."; + } + return oss.str(); +} + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h new file mode 100644 index 0000000000000000000000000000000000000000..37dda0f9acaacde7f219e0200e5d6f83d23b29bc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/grad_layout_contract.h @@ -0,0 +1,79 @@ +#pragma once + +#include + +namespace torch { +namespace autograd { +namespace utils { + +// Helper functions to enforce the "Gradient Layout Contract" described in +// torch/csrc/autograd/functions/accumulate_grad.h. + +// Checks if grad obeys the contract with variable. +inline bool obeys_layout_contract( + const at::Tensor& grad, + const at::Tensor& variable) { + TORCH_INTERNAL_ASSERT(!grad.is_sparse()); + TORCH_INTERNAL_ASSERT(!grad.is_sparse_csr()); + TORCH_INTERNAL_ASSERT(!variable.is_sparse_csr()); + + if (variable.is_nested()) { + // TODO: Nested Tensor does not have an implementation of detach. The + // current implementation of nested tensor likely does obey the gradient + // contract and should return true, but this would likely change in the + // future + return false; + } else if (variable.is_sparse()) { + // Gradient Layout Contract is not applicable for sparse layouts + return false; + } else if (variable.is_non_overlapping_and_dense()) { + // Only look at stride for dimensions that are not of size 1. + const auto& grad_sizes = grad.sym_sizes(); + const auto& grad_strides = grad.sym_strides(); + const auto& variable_strides = variable.sym_strides(); + for (const auto idx : c10::irange(grad_sizes.size())) { + if (grad_sizes[idx] != 1) { + if (grad_strides[idx] != variable_strides[idx]) { + return false; + } + } else { + // This should not be needed but we don't check if a Tensor has views + // before stashing it. And 0-strided Tensors of size 1 are actually + // views for ops like cat. + // TODO: Actually detect views in the accumulateGrad function so that + // this Tensor is not considered at all. + if (grad_strides[idx] == 0) { + return false; + } + } + } + return true; + } else { + return grad.is_contiguous(at::MemoryFormat::Contiguous); + } +} + +// Creates a clone of new_grad that obeys the contract with variable. +// The clone should attach to new_grad's history if GradMode::is_enabled(). +inline at::Tensor clone_obey_contract( + const at::Tensor& new_grad, + const at::Tensor& variable) { + if (variable.is_non_overlapping_and_dense()) { + // (1) + // Does this dicey-looking sequence attach the result to new_grad's + // history if GradMode::is_enabled()? Yes, and @alband says it should. + return std::move(new_grad + .new_empty_strided_symint( + variable.sym_sizes(), + variable.sym_strides(), + variable.options().memory_format(c10::nullopt)) + .copy_(new_grad)); + } else { + // (2) + return new_grad.clone(at::MemoryFormat::Contiguous); + } +} + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h new file mode 100644 index 0000000000000000000000000000000000000000..f22a22312159d4260868eaef0b1ffd0a0f15ccab --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/lambda_post_hook.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +namespace torch { +namespace autograd { +namespace utils { + +// Turns lambda into a torch::autograd::FunctionPostHook. +class LambdaPostHook : public torch::autograd::FunctionPostHook { + using variable_list = std::vector; + + public: + // The lambda function takes as arguments the outputs and inputs of the + // autograd function and can modify the outputs of the autograd function by + // returning a new output if needed. + /* implicit */ LambdaPostHook( + std::function + fn) + : fn_(std::move(fn)) {} + + variable_list operator()( + const variable_list& outputs, + const variable_list& inputs) override { + return fn_(outputs, inputs); + } + + protected: + std::function fn_; +}; + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h new file mode 100644 index 0000000000000000000000000000000000000000..7701e97fe9189cb49a2c98cfa5cc1f5e41190941 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/python_arg_parsing.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace autograd { +namespace utils { + +// The parameter allow_copy is to accept copy for Tensor.to (and by proxy +// PackedSequences.to) but not nn.Module.to. +inline std::tuple< + c10::optional, + c10::optional, + bool, + bool, + c10::optional> +parse_to_conversion(PythonArgs& r, bool allow_copy) { + if (r.idx == 0) { + if (!allow_copy && !r.isNone(3)) + throw std::runtime_error(".to() does not accept copy argument"); + return std::make_tuple( + r.deviceOptional(0), + r.scalartypeOptional(1), + r.toBool(2), + r.toBool(3), + r.memoryformatOptional(4)); + } else if (r.idx == 1) { + if (!allow_copy && !r.isNone(2)) + throw std::runtime_error(".to() does not accept copy argument"); + return std::make_tuple( + c10::nullopt, + r.scalartype(0), + r.toBool(1), + r.toBool(2), + r.memoryformatOptional(3)); + } else { + auto tensor = r.tensor(0); + if (!allow_copy && !r.isNone(2)) + throw std::runtime_error(".to() does not accept copy argument"); + return std::make_tuple( + tensor.device(), + tensor.scalar_type(), + r.toBool(1), + r.toBool(2), + r.memoryformatOptional(3)); + } +} +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h new file mode 100644 index 0000000000000000000000000000000000000000..92e3c3611eadd18abc0ce10dbb481f61d83d2c80 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/utils/warnings.h @@ -0,0 +1,28 @@ +#pragma once +#include + +#include +#include + +namespace torch { +namespace autograd { +namespace utils { + +// Warning handler for multi-threaded contexts. Gather warnings from +// all threads into a single queue, then process together at the end +// in the main thread. +class DelayWarningHandler : public at::WarningHandler { + public: + ~DelayWarningHandler() override = default; + void replay_warnings(); + + private: + void process(const c10::Warning& warning) override; + + std::vector warnings_; + std::mutex mutex_; +}; + +} // namespace utils +} // namespace autograd +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h new file mode 100644 index 0000000000000000000000000000000000000000..3f3864077e2106650ca4ea126740f92d4ba3f824 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Singleton class per worker which is responsible for storing the distributed +// autograd context for each autograd pass and also cleans up data for an +// autograd pass once its done. +// +// Each autograd pass is assigned a unique autograd_context_id and all data for +// that pass (DistAutogradContext) is stored in this container indexed by the +// autograd_context_id. The autograd_context_id itself is a 64 bit globally +// unique id. The first 16 bits is the worker_id and the next 48 bits is an +// auto-incrementing id for each worker. +// +// This container is also responsible for maintaining a globally unique message +// id, which is used to associate send/recv autograd function pairs. The format +// is similar to the autograd_context_id where we have a 64 bit integer with +// first 16 bits being the worker id and next 48 bits are auto-incrementing. +class TORCH_API DistAutogradContainer { + public: + explicit DistAutogradContainer(uint32_t num_shards); + + // One time initialization of the container. + static DistAutogradContainer& init(int64_t worker_id); + + // Retrieve the singleton instance of the container, ensures we have + // initialized the container. + static DistAutogradContainer& getInstance(); + + // Create a new context for a distributed autograd pass. + const ContextPtr newContext(); + + // Clean up resources for a given context_id once the autograd pass is done. + // Sends RPC to other workers this worker knows about, telling them to clean + // up their context as well. Throws an exception if the context_id does not + // exist. + void releaseContext(int64_t context_id); + + // Releases an autograd context if it is present on this node. Also sends RPC + // to other workers this worker knows about, telling them to clean up their + // context. Does nothing if it is not present. + void releaseContextIfPresent(int64_t context_id); + + // Checks if the passed in context_id is valid. + void isValidContext(int64_t context_id); + + // Retrieve the autograd context for a given context_id. + ContextPtr retrieveContext(int64_t context_id); + + // Retrieves the currently active autograd context for the current thread. + ContextPtr currentContext(); + + // Checks whether or not the current thread has a valid autograd context. + bool hasValidContext() const; + + // Generate a new autograd_message_id for send/recv autograd functions. + int64_t newAutogradMessageId(); + + // Creates a new autograd context with the provided context_id. If a context + // already exists with the provided context_id, we just return it. + // This does not set the current context for the current thread. + ContextPtr getOrCreateContext(int64_t context_id); + + // Retrieves the maximum possible autograd_context_id/autograd_message_id that + // can be generated by this worker. + int64_t getMaxId(); + + // Retrieves the worker ID for this node + rpc::worker_id_t getWorkerId() const; + + // Can set current context id if there is no valid context yet + static void setCurrentContextId(int64_t contextId); + + // Forcibly sets the thread local current context id. Should only be used in + // cases where you know what you're doing and need to override the thread + // local. Otherwise, use setCurrentContextId instead. + static void forceCurrentContextId(int64_t contextId); + + // Clear current context id + void clearCurrentContext(); + + // Returns the number of autograd contexts in the container. + size_t numAutogradContexts() const; + + // Returns the current thread local context id for this thread. + static int64_t currentContextId(); + + DistAutogradContainer(const DistAutogradContainer&) = delete; + DistAutogradContainer& operator=(const DistAutogradContainer&) = delete; + DistAutogradContainer(DistAutogradContainer&&) = delete; + DistAutogradContainer& operator=(DistAutogradContainer&&) = delete; + + private: + // Number of shards for the map storing autograd contexts. We'd like this + // to be a power of 2 and we don't expect a value much higher than the + // number of cores would provide much benefit. + static constexpr uint32_t kNumDefaultShards = 128; + + // Use cache line size for alignment. + static constexpr int kCacheLineSize = 64; + + // Structure holding one shard of the sharded autograd context map with its + // associated lock. Align to cache line size to avoid contention between + // adjacent entries. + struct alignas(kCacheLineSize) ContextsShard { + // Lock for this shard. + mutable std::mutex lock; + + // Map storing autograd contexts for this shard. + std::unordered_map contexts; + }; + + DistAutogradContainer() = delete; + ~DistAutogradContainer() = default; + + static DistAutogradContainer& getInstanceInternal(); + + // Retrieve the shard for given context_id. + ContextsShard& getShard(int64_t context_id); + + // Sends an RPC to the workers that have a context corresponding to passed in + // context_id. This function should be called with the lock. + void sendReleaseContextRpc( + const std::unordered_set& workerIds, + int64_t context_id); + + // Erase context_id from the autograd context map, and reset the thread local + // current context id if it corresponds to the passed in context id. This + // function should be called with the lock. + void eraseContextIdAndReset(ContextsShard& shard, int64_t context_id); + + // Compute the number of shards for the autograd_contexts_ map. + static uint32_t computeNumShards(); + + // Auto incrementing context id used to identify unique autograd passes. + // Initialized with the first 16 bits being the worker_id. + std::atomic next_context_id_; + + // Unique id to identify a worker in the distributed setting. + int16_t worker_id_; + + // Whether or not the container has been initialized appropriately. + bool initialized_; + + // Sharded autograd context map. + std::vector autograd_contexts_; + + // Number of shards for the sharded autograd_contexts_ map. + uint32_t num_shards_; + + // Autograd message id to identify unique send/recv autograd function pairs. + std::atomic next_autograd_message_id_; + + // Maximum allowed value for autograd_context_id or autograd_message_id. + int64_t max_id_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ff576ace174fdfae29abafdec3678532e03cc29d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/functions/sendrpc_backward.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// As part of our distributed autograd implementation, whenever we send an RPC +// from one node to another, we add a 'SendRpcBackward' autograd function to the +// autograd graph. This is more or less a placeholder function that is used to +// kickoff the autograd engine on the current worker on the backward pass. The +// edges for this autograd function are the inputs to the RPC method. +// +// During the backward pass, this function is queued for execution in the +// autograd engine which eventually runs the rest of the autograd graph. +struct TORCH_API SendRpcBackward : public torch::autograd::Node { + public: + torch::autograd::variable_list apply( + torch::autograd::variable_list&& inputs) override; + + // SendRpcBackward is actually the root of an autograd graph on the local + // node. As a result, it doesn't receive any 'inputs', but rather the RPC + // framework passes gradients over to this function to kickoff local autograd + // computation. + void setGrads(const torch::autograd::variable_list& grads); + + // Retrieve the grads for the function. + const torch::autograd::variable_list& getGrads() const; + + private: + torch::autograd::variable_list grads_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..d9d3efa24a28d73cbc7fbc24a49738f22c492ffd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +// Functions that need to be copied from the CPython source +// should go in cpython_defs.c. Copying is required when, e.g., +// we need to call internal CPython functions that are not exposed. + +#if IS_PYTHON_3_11_PLUS + +#include + +int THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame* frame); + +PyFunctionObject* _PyFunction_CopyWithNewCode( + PyFunctionObject* o, + PyCodeObject* code); + +void THP_PyFrame_Clear(_PyInterpreterFrame* frame); + +#endif diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h new file mode 100644 index 0000000000000000000000000000000000000000..58d3d9fccab11ef464003c83f618eb436e5ddd3c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h @@ -0,0 +1,7 @@ +#pragma once +#include + +extern "C" { +PyObject* torch_c_dynamo_eval_frame_init(void); +extern bool is_dynamo_compiling; +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h new file mode 100644 index 0000000000000000000000000000000000000000..78727403351b54279b375fd66b5a54ea210c691a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h @@ -0,0 +1,4 @@ +#pragma once +#include + +PyObject* torch_c_dynamo_guards_init(); diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h new file mode 100644 index 0000000000000000000000000000000000000000..c496df083dfece0000c6a5258f7f2606b8c7a852 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h @@ -0,0 +1,14 @@ +#pragma once + +// C2039 MSVC +#include +#include +#include + +#include + +namespace torch { +namespace dynamo { +void initDynamoBindings(PyObject* torch); +} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..9422cd0c0383186484f7e4ae9906ba2aa137dbba --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h @@ -0,0 +1,7 @@ +#pragma once +#include + +// see [Note: Compiled Autograd] +namespace torch::dynamo::autograd { +PyObject* torch_c_dynamo_compiled_autograd_init(); +} // namespace torch::dynamo::autograd diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..08a5db3a9e9c6d6da6035fd36e0c0b40e206337d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/device_utils.h @@ -0,0 +1,51 @@ +#pragma once + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. + +#ifdef USE_CUDA + +// FIXME: Currently, CPU and CUDA backend are mutually exclusive. +// This is a temporary workaround. We need a better way to support +// multi devices. + +#include +#include + +#define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ + do { \ + const cudaError_t code = EXPR; \ + const char* msg = cudaGetErrorString(code); \ + if (code != cudaSuccess) { \ + throw std::runtime_error( \ + std::string("CUDA error: ") + std::string(msg)); \ + } \ + } while (0) + +namespace torch { +namespace aot_inductor { + +using DeviceStreamType = cudaStream_t; + +} // namespace aot_inductor +} // namespace torch + +#else // !USE_CUDA + +#define AOTI_RUNTIME_DEVICE_CHECK(EXPR) \ + bool ok = EXPR; \ + if (!ok) { \ + throw std::runtime_error("CPU runtime error"); \ + } + +namespace torch { +namespace aot_inductor { + +using DeviceStreamType = void*; + +} // namespace aot_inductor +} // namespace torch + +#endif // USE_CUDA diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h new file mode 100644 index 0000000000000000000000000000000000000000..2860a8b251e6d6b264e152e97357fcde358c1bdc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_runtime/model.h @@ -0,0 +1,525 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +// WARNING: Be careful when adding new includes here. This header will be used +// in model.so, and should not refer to any aten/c10 headers except the stable +// C ABI defined in torch/csrc/inductor/aoti_torch/c/shim.h. The same rule +// applies to other files under torch/csrc/inductor/aoti_runtime/. +#include +#include + +#define AOTI_RUNTIME_CHECK(EXPR, MSG) \ + do { \ + bool ok = EXPR; \ + if (!ok) { \ + throw std::runtime_error(MSG); \ + } \ + } while (0) + +#if defined(__GNUC__) || defined(__clang__) +#define AOTI_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define AOTI_NOINLINE __declspec(noinline) +#else +#define AOTI_NOINLINE +#endif + +// At codegen time, we write out a binary file called constants.bin. +// We then turn the raw binary to an object file that exposes this +// symbol and link it into the final .so. +// For information on the binary format, see `man objcopy`, under +// the "binary-architecture" flag: +// https://man7.org/linux/man-pages/man1/objcopy.1.html +// todo: use #embed in C++ 23 once available +extern const uint8_t _binary_constants_bin_start[]; +extern const uint8_t _binary_constants_bin_end[]; + +#define AOTI_CONST_GPU_ALIGNMENT 64 + +namespace { + +#ifdef USE_CUDA + +using CUDAPtr = std::unique_ptr>; + +CUDAPtr RAII_cudaMalloc(size_t num_bytes) { + void* data_ptr; + AOTI_RUNTIME_DEVICE_CHECK(cudaMalloc((void**)&data_ptr, num_bytes)); + auto deleter = [](void* ptr) { AOTI_RUNTIME_DEVICE_CHECK(cudaFree(ptr)); }; + return CUDAPtr(data_ptr, deleter); +} + +#endif // USE_CUDA + +} // anonymous namespace + +AOTI_NOINLINE static void throw_exception( + const char* call, + const char* file, + int64_t line) { + std::stringstream ss; + ss << call << " API call failed at " << file << ", line " << line; + throw std::runtime_error(ss.str()); +} + +#define AOTI_TORCH_ERROR_CODE_CHECK(call) \ + if ((call) != AOTI_TORCH_SUCCESS) { \ + throw_exception(#call, __FILE__, __LINE__); \ + } + +using DeleterFnPtr = void (*)(void*); + +namespace torch { +namespace aot_inductor { + +inline void delete_tensor_object(void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_delete_tensor_object(reinterpret_cast(ptr))); +} + +// RAIIAtenTensorHandle steals the tensor objects created by the libtorch C ABI +class RAIIAtenTensorHandle { + public: + RAIIAtenTensorHandle() = delete; + RAIIAtenTensorHandle(const RAIIAtenTensorHandle& other) = delete; + RAIIAtenTensorHandle& operator=(const RAIIAtenTensorHandle& other) = delete; + + // Steal the ownership from another RAIIAtenTensorHandle using std::move + RAIIAtenTensorHandle(RAIIAtenTensorHandle&& other) = default; + RAIIAtenTensorHandle& operator=(RAIIAtenTensorHandle&& other) = default; + + // Steal the ownership from raw AtenTensorHandle + RAIIAtenTensorHandle(AtenTensorHandle handle) + : handle_(handle, delete_tensor_object) {} + + ~RAIIAtenTensorHandle() { + handle_.reset(); + } + + // Return a raw AtenTensorHandle to be used by aoti_torch functions + // Note: this function does NOT transfer the ownership of the handle + operator AtenTensorHandle() const { + return handle_.get(); + } + + AtenTensorHandle release() { + return handle_.release(); + } + + AtenTensorHandle get() { + return handle_.get(); + } + + void reset() { + handle_.reset(); + } + + int64_t size(int64_t d) { + int64_t size; + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_size(handle_.get(), d, &size)); + return size; + } + + int64_t stride(int64_t d) { + int64_t stride; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_stride(handle_.get(), d, &stride)); + return stride; + } + + int64_t storage_offset() { + int64_t storage_offset; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_get_storage_offset(handle_.get(), &storage_offset)); + return storage_offset; + } + + private: + std::unique_ptr handle_; +}; + +using ConstantMap = std::unordered_map; + +// Steal the ownership from raw AtenTensorHandle to RAIIAtenTensorHandle +inline std::vector steal_from_raw_handles_to_raii_handles( + AtenTensorHandle* handles, + size_t size) { + std::vector result; + result.reserve(size); + for (size_t i = 0; i < size; i++) { + result.emplace_back(handles[i]); + handles[i] = nullptr; + } + return result; +} + +// Defines the base class for AOTInductorModel, which is generated by the +// AOTInductor cpp codegen. Since we do not need dynamic dispatch, we rely +// on curiously recurring template pattern (CRTP) to save some runtime +// v-table overhead. The generated AOTInductorModel is specialized with +// methods such as run_impl. +template +class AOTInductorModelBase { + public: + AOTInductorModelBase( + size_t num_inputs, + size_t num_outputs, + size_t num_constants, + std::optional cubin_dir) + : inputs_info_(num_inputs), + outputs_info_(num_outputs), + constants_info_(num_constants), + cubin_dir_(cubin_dir), + device_idx_(-1) { +#ifdef USE_CUDA + AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx_)); +#endif // USE_CUDA + } + + ~AOTInductorModelBase() { +#ifdef USE_CUDA + if (run_finished_) { + auto code = cudaEventDestroy(*run_finished_); + if (code != cudaSuccess) { + std::cerr << "Failed to destroy CUDA event in AOTInductor model: " + << cudaGetErrorString(code) << std::endl; + } + } +#endif // USE_CUDA + } + + AOTInductorModelBase(AOTInductorModelBase&&) = delete; + AOTInductorModelBase& operator=(AOTInductorModelBase&&) = delete; + AOTInductorModelBase(const AOTInductorModelBase&) = delete; + AOTInductorModelBase& operator=(const AOTInductorModelBase&) = delete; + + void run( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor) { +#ifdef USE_CUDA + if (!run_finished_) { + cudaEvent_t run_finished; + AOTI_RUNTIME_DEVICE_CHECK(cudaEventCreate(&run_finished)); + run_finished_.emplace(run_finished); + } + + auto* model = static_cast(this); + model->run_impl(input_handles, output_handles, stream, proxy_executor); + AOTI_RUNTIME_DEVICE_CHECK(cudaEventRecord(*run_finished_, stream)); +#else // !USE_CUDA + run_finished_ = false; + auto* model = static_cast(this); + model->run_impl(input_handles, output_handles, stream, proxy_executor); + run_finished_ = true; +#endif // USE_CUDA + } + + void load_constants(bool is_cpu) { + size_t num_constants = this->num_constants(); + constants_map_->reserve(num_constants); + + std::vector constants_internal_offset(num_constants); + if (!is_cpu) { + make_cuda_constant_blob(constants_internal_offset); + } + + size_t bytes_read = 0; + for (size_t i = 0; i < num_constants; i++) { + std::string name = this->constant_name(i); + size_t data_size = this->constant_data_size(i); + uint8_t* internal_ptr = (data_size != 0) + ? constant_ptr(constants_internal_offset[i], bytes_read, data_size) + : nullptr; + bytes_read += data_size; + + // Create at::Tensor from copied memory. + auto dtype = this->constant_type(i); + auto ndim = this->constant_ndim(i); + auto size = this->constant_shape(i); + auto stride = this->constant_stride(i); + auto offset = this->constant_offset(i); + + auto device_type = aoti_torch_device_type_cuda(); + if (is_cpu) { + device_type = aoti_torch_device_type_cpu(); + } + + AtenTensorHandle tensor_handle; + int device_idx = -1; // should be the same as was used for constant_blob_ +#ifdef USE_CUDA + AOTI_RUNTIME_DEVICE_CHECK(cudaGetDevice(&device_idx)); +#endif // USE_CUDA + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_create_tensor_from_blob( + internal_ptr, + ndim, + size, + stride, + offset, + dtype, + device_type, + device_idx, + &tensor_handle)); + constants_map_->emplace(std::move(name), tensor_handle); + } + this->update_constants_map(constants_map_); + } + +#ifdef USE_CUDA + CUDAPtr&& release_constant_blob() { + return std::move(constant_blob_); + } +#endif + + uint8_t* constant_ptr( + size_t constant_offset, + size_t bytes_read, + size_t data_size) { +#ifdef USE_CUDA + auto* constants_ptr = static_cast(constant_blob_.get()); + uint8_t* internal_ptr = constants_ptr + constant_offset; + // Copy data to GPU memory + // TODO: Handle shared storage case. + AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( + internal_ptr, + _binary_constants_bin_start + bytes_read, + data_size, + cudaMemcpyHostToDevice)); + return internal_ptr; +#else // !USE_CUDA + // get pointer to constant which is packed in model during compile time. + return const_cast(_binary_constants_bin_start) + bytes_read; +#endif // USE_CUDA + } + + void make_cuda_constant_blob(std::vector& constants_internal_offset) { +#ifdef USE_CUDA + size_t num_constants = this->num_constants(); + // Compute required blob size with 64-alignment if on GPU. + size_t max_blob = 0; + for (size_t i = 0; i < num_constants; i++) { + size_t data_size = this->constant_data_size(i); + if (data_size % AOTI_CONST_GPU_ALIGNMENT) { + data_size = AOTI_CONST_GPU_ALIGNMENT + + (data_size / AOTI_CONST_GPU_ALIGNMENT) * AOTI_CONST_GPU_ALIGNMENT; + } + constants_internal_offset[i] = max_blob; + max_blob += data_size; + } + constant_blob_ = RAII_cudaMalloc(max_blob); +#endif // USE_CUDA + } + + size_t num_inputs() const { + return inputs_info_.size(); + } + + size_t num_outputs() const { + return outputs_info_.size(); + } + + size_t num_constants() const { + return constants_info_.size(); + } + + const char* input_name(int64_t idx) const { + return inputs_info_.at(idx).name; + } + + const char* output_name(int64_t idx) const { + return outputs_info_.at(idx).name; + } + + const char* constant_name(int64_t idx) const { + return constants_info_.at(idx).name; + } + + size_t constant_ndim(int64_t idx) { + return constants_info_.at(idx).shape.size(); + } + + const int64_t* constant_shape(int64_t idx) const { + return constants_info_.at(idx).shape.data(); + } + + const int64_t* constant_stride(int64_t idx) const { + return constants_info_.at(idx).stride.data(); + } + + int32_t constant_type(int64_t idx) const { + return constants_info_.at(idx).dtype; + } + + size_t constant_offset(int64_t idx) const { + return constants_info_.at(idx).offset; + } + + size_t constant_data_size(int64_t idx) const { + return constants_info_.at(idx).data_size; + } + + const char* get_in_spec() const { + return in_spec_.c_str(); + } + + const char* get_out_spec() const { + return out_spec_.c_str(); + } + + void update_constants_map(std::shared_ptr constants_map) { + constants_map_ = std::move(constants_map); + if (!constants_map_) { + return; + } + constants_.resize(constants_info_.size()); + int idx = 0; + for (const auto& info : constants_info_) { + const auto it = constants_map_->find(info.name); + if (it != constants_map_->end()) { + constants_[idx] = it->second; + } + idx++; + } + } + + /// Returns true if the model is complete. + bool is_finished() { +#ifdef USE_CUDA + if (!run_finished_) { + throw std::runtime_error{"Model CUDA event was not initialized"}; + } + + auto event_status = cudaEventQuery(*run_finished_); + if (event_status == cudaSuccess) { + return true; + } else if (event_status == cudaErrorNotReady) { + return false; + } + + throw std::runtime_error( + std::string("The model did not finish successfully. Error: ") + + cudaGetErrorString(cudaGetLastError())); +#else // !USE_CUDA + return run_finished_; +#endif // USE_CUDA + } + + /// Synchronizes completion event. + void wait_for_completion() { +#ifdef USE_CUDA + if (!run_finished_) { + throw std::runtime_error{"Model event was not initialized"}; + } + + AOTI_RUNTIME_DEVICE_CHECK(cudaEventSynchronize(*run_finished_)); +#endif // USE_CUDA + } + + protected: + struct ParamInfo { + const char* name = nullptr; + }; + + struct ConstInfo { + const char* name = nullptr; + std::vector shape; + std::vector stride; + int32_t dtype; + int64_t offset; + size_t data_size; + }; + + std::vector inputs_info_; + std::vector outputs_info_; + std::vector constants_info_; + std::string in_spec_; + std::string out_spec_; + + std::shared_ptr constants_map_; + std::vector constants_; + +#ifdef USE_CUDA + // Holds the blob storage for constants' at::Tensor for CUDA. + CUDAPtr constant_blob_; +#endif // USE_CUDA + + // A directory with CUDA binary files, e.g. compiled kernels, etc. + const std::optional cubin_dir_; + + // Record if the model finishes an inference run so that its owning + // AOTModelContainer can re-use this instance. +#ifdef USE_CUDA + std::optional run_finished_; +#else // !USE_CUDA + bool run_finished_; +#endif + + // Generated model uses this device index to create CUDA guards. + int device_idx_; +}; + +// Codegen-ed classes can derive from this to keep pointers to loaded kernels. +class AOTInductorModelKernelsBase { + public: + virtual ~AOTInductorModelKernelsBase() = default; +}; + +class AOTInductorModel : public AOTInductorModelBase { + public: + AOTInductorModel(std::shared_ptr, std::optional); + + void run_impl( + AtenTensorHandle* + input_handles, // array of input AtenTensorHandle; handles + // are stolen; the array itself is borrowed + AtenTensorHandle* + output_handles, // array for writing output AtenTensorHandle; handles + // will be stolen by the caller; the array itself is + // borrowed + DeviceStreamType stream, + AOTIProxyExecutorHandle proxy_executor); + + static std::unique_ptr Create( + std::shared_ptr constants, + std::optional cubin_dir) { + return std::make_unique(std::move(constants), cubin_dir); + } + + private: + std::unique_ptr kernels_; +}; + +#ifdef USE_CUDA +class AOTICudaStreamGuard { + public: + AOTICudaStreamGuard(cudaStream_t stream, int32_t device_index) { + CUDAStreamGuardHandle ptr; + AOTI_TORCH_ERROR_CODE_CHECK( + aoti_torch_create_cuda_stream_guard(stream, device_index, &ptr)); + guard_ = + std::unique_ptr>(ptr, [](void* ptr) { + AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_delete_cuda_stream_guard( + reinterpret_cast(ptr))); + }); + } + + private: + std::unique_ptr> guard_; +}; +#endif // USE_CUDA + +} // namespace aot_inductor +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4318212869524ebad3a6f29c5a9cfc2687a83c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/inductor/aoti_torch/tensor_converter.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace aot_inductor { + +// Functions declared here are not meant to be called from the AOTInductor +// generated model.so + +// No ownership transfer, just pointer type conversion +TORCH_API at::Tensor* tensor_handle_to_tensor_pointer(AtenTensorHandle handle); + +// No ownership transfer, just pointer type conversion +TORCH_API AtenTensorHandle tensor_pointer_to_tensor_handle(at::Tensor* tensor); + +// unsafe_alloc_new_handles_from_tensors is used for allocating new aten +// tensor objects and return them as a vector of AtenTensorHandle (raw +// pointers), and those pointers will be stolen by model.so. +TORCH_API std::vector unsafe_alloc_new_handles_from_tensors( + std::vector& tensors); + +// alloc_tensors_by_stealing_from_handles is used for creating a vector of aten +// tensors by stealing from an array of handles. Only the handles are stolen, +// and the array itself is borrowed. +// +// WARNING: Can NOT be called in model.so unless in the non-ABI-compatible mode +TORCH_API std::vector alloc_tensors_by_stealing_from_handles( + AtenTensorHandle* handles, + size_t length); + +} // namespace aot_inductor +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h new file mode 100644 index 0000000000000000000000000000000000000000..496ecbdbbc6c530e50e0d3a8e3b815e9a5f005e0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_data.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API BackendData { + public: + struct Info { + /** + * Used by Lazy Graph Executor to tag info on BackendData objs + * */ + virtual ~Info() = default; + }; + /** + * Represents (Tensor) data stored on a backend device + * in its native format. + * */ + using Handle = int64_t; + + BackendData(BackendDevice device, Shape shape) + : device_(std::move(device)), shape_(std::move(shape)) {} + + virtual ~BackendData() = default; + + const BackendDevice& device() const { + return device_; + } + + const Shape& shape() const { + return shape_; + } + + Info* info() const { + return info_.get(); + } + + std::shared_ptr SetInfo(std::shared_ptr info) { + std::swap(info, info_); + return info; + } + + virtual Handle GetHandle() = 0; + + virtual void Assign(const BackendData& data) = 0; + + virtual bool HasValue() const = 0; + + private: + BackendDevice device_; + Shape shape_; + std::shared_ptr info_; +}; + +using BackendDataPtr = std::shared_ptr; + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h new file mode 100644 index 0000000000000000000000000000000000000000..4c239d1e4b71c0049e28e05a3e67a35f68516232 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_device.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { +struct Device; +} + +namespace torch { +namespace lazy { + +// Backend should extend it and define their own supported hardware types. +struct TORCH_API BackendDeviceType { + int8_t type{(int8_t)at::kCPU}; + // Note: previous default value was '0', which actually maps to at::kCPU, at + // least now it is explicit, we may want to make default/undefined semantics + // more clear though + BackendDeviceType() : type((int8_t)at::kCPU) {} + BackendDeviceType(int8_t type) : type(type) {} + + virtual ~BackendDeviceType() = default; + virtual std::string toString() const { + return "Unknown"; + } +}; + +class TORCH_API BackendDevice { + public: + // The default constructor will set both the device type and ordinal + // to backend specific defaults. + BackendDevice(); + BackendDevice(std::shared_ptr&& type, int64_t ordinal); + + int8_t type() const; + int64_t ordinal() const { + return ordinal_; + } + + bool operator==(const BackendDevice& other) const { + return compare(other) == 0; + } + bool operator!=(const BackendDevice& other) const { + return compare(other) != 0; + } + bool operator<(const BackendDevice& rhs) const { + return compare(rhs) < 0; + } + + std::string toString() const; + + private: + int compare(const BackendDevice& rhs) const; + + // Use shared_ptr instead of unique_ptr so that BackendDevice can be copied. + std::shared_ptr type_; + int64_t ordinal_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const BackendDevice& device); + +// Helpers for converting a c10::Device to BackendDevice and vice versa. +TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device); +TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device); + +// Tries to extract the backend device out of the lazy tensor. Returns nullopt +// if the input is not a lazy tensor. +TORCH_API c10::optional GetBackendDevice( + const at::ITensorListRef tensors); +TORCH_API c10::optional GetBackendDevice( + const at::TensorList tensors); +TORCH_API c10::optional GetBackendDevice( + const at::Tensor& tensor); +TORCH_API c10::optional GetBackendDevice( + const c10::optional& device); + +// For variadic template. +TORCH_API c10::optional GetBackendDevice(); + +template +c10::optional GetBackendDevice( + const T& tensor, + const Args&... forward_tensors) { + auto optional_device = GetBackendDevice(tensor); + if (optional_device) { + return optional_device; + } + return GetBackendDevice(forward_tensors...); +} + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..f94d3b602e52c889c96162143f8d6eb8b8d0237b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/backend/backend_interface.h @@ -0,0 +1,158 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +struct IrBuilder; + +/** + * Work in progress- don't treat this as a stable interface yet! + */ +class TORCH_API BackendImplInterface { + public: + virtual ~BackendImplInterface() = default; + + /** + * Initialization/Teardown + * */ + // No-op by default. Allows custom functionality to be exposed through + // extension bindings. + virtual void InitializeAtenBindings() const {} + + virtual void PrepareToExit() const = 0; + + /** + * Configuration + * */ + + virtual void SetRngSeed(size_t seed) const = 0; + + /** + * IR Tracing + * */ + + virtual const IrBuilder* GetIrBuilder() const = 0; + + /** + * Data Transfer + * */ + + virtual BackendDataPtr MakeComputationDataFromTensor( + const at::Tensor& tensor, + const Shape& shape, + const BackendDevice& device) const = 0; + virtual BackendDataPtr MakeComputationDataFromScalar( + const at::Scalar& scalar, + const torch::lazy::BackendDevice& device) const = 0; + virtual BackendDataPtr CreateDataPlaceholder( + const BackendDevice& device, + const Shape& shape) const = 0; + + // Gets backend data if the node is a device data node. Otherwise returns + // nullptr + virtual BackendDataPtr GetComputationDataFromNode(const Node*) const = 0; + + virtual at::Tensor MakeTensorFromComputationData( + const BackendDataPtr data, + c10::optional logical_scalar_type) const = 0; + + /** + * Lowering, Compilation, Execution + * */ + + virtual std::unique_ptr CreateLoweringContext( + const std::string& name, + BackendDevice device, + c10::ArrayRef post_order, + Util::EmissionMap emit_status) const = 0; + + virtual std::unique_ptr CreateLoweringContext( + const std::string& name, + BackendDevice device) const = 0; + + // TODO(whc) need to keep this? + virtual std::vector GetCompilationDevices( + const std::string& device, + c10::ArrayRef devices) const = 0; + + virtual std::vector Compile( + std::vector instances) const = 0; + + virtual std::vector ExecuteComputation( + torch::lazy::ComputationPtr computation, + c10::ArrayRef arguments, + const BackendDevice& device) const = 0; + + /** + * Device Configuration + * */ + + // Set or get the default device type. + // For backends used with virtual c10::Devices, this configures what real + // device type the backend should use, and matters if the backend supports + // more than one type of real device. + virtual std::shared_ptr GetDefaultDeviceType() const = 0; + virtual void SetDefaultDeviceType(int8_t type) = 0; + + // Set or get the default device ordinal. + // For backends that supports multi-device, this configures what the + // default device the backend should use. + virtual int64_t GetDefaultDeviceOrdinal() const = 0; + virtual void SetDefaultDeviceOrdinal(int64_t) = 0; + + // Specify which aten device should be used for eager fallback + // may change depending on current 'Default' DeviceType + virtual at::DeviceType EagerFallbackDeviceType() const = 0; + + // Query all available backend devices + virtual std::vector GetBackendDevices() const = 0; + + virtual std::string CreateMetricReport() const { + return ""; + } + + // Map a particular c10:: device to a concrete backend device + // Note:: c10:: devices may be virtual or concrete. xla:: and lazy:: are + // virtual devices, meaning they may map to a gpu, tpu, etc. behind the + // scenes. In the future, non-virtual c10:: devices may also use lazy tensors + // through a mode, in which case these APIs should still work, but should be + // identity mappings. + virtual BackendDevice GetBackendDevice(c10::Device device) const = 0; + + // TODO(whc) + // Additional APIs expected for supporting distributed training, to be + // designed + + /** + * Debug/Metrics + * */ + + // virtual std::map GetMetrics() const = 0; + + // virtual MemoryInfo GetMemoryInfo(const std::string& device) = 0; + + virtual std::string GetComputationBackendText( + const ComputationPtr computation) const = 0; +}; + +class TORCH_API BackendRegistrar { + public: + BackendRegistrar(const BackendImplInterface* backend_impl_interface); +}; + +TORCH_API bool hasBackend(); +TORCH_API const BackendImplInterface* getBackend(); + +TORCH_API const IrBuilder* getIrBuilder(); + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h new file mode 100644 index 0000000000000000000000000000000000000000..1ee635b667f90a1d56a186650c3bb023ae542660 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/cache.h @@ -0,0 +1,144 @@ +/** + * Cache utils in this file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/cache.h + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +// Generic key and object cache with LRU expiration policy. The objects of type +// T will be stored as std::shared_ptr and taken and returned as such, by the +// cache API. +template < + typename K, + typename T, + typename H = std::hash, + typename E = std::equal_to> +class Cache { + public: + using TypePtr = std::shared_ptr; + using Element = std::pair; + + explicit Cache(size_t max_size) : max_size_(max_size) {} + + // Adds an object to the cache, unless it already exists. If the cache grows + // beyond the limit set during construction, the oldest used object will be + // removed from the cache. + TypePtr Add(K key, TypePtr object) { + if (!max_size_) { + return object; + } + std::lock_guard slock(lock_); + element_list_.emplace_front(Element(std::move(key), std::move(object))); + auto it = element_list_.begin(); + auto emplace_result = element_map_.emplace(&it->first, it); + if (!emplace_result.second) { + element_list_.erase(it); + DoLRU(emplace_result.first->second); + } else if (element_list_.size() > max_size_) { + Element* last = &element_list_.back(); + element_map_.erase(&last->first); + element_list_.pop_back(); + } + return emplace_result.first->second->second; + } + + // Retrieves the existing object if it exists. If it does, its position in + // the LRU list gets moved to the head of the list. + // Returns nullptr if no object with the specified key is found within the + // cache. + TypePtr Get(const K& key) { + if (!max_size_) { + return nullptr; + } + std::lock_guard slock(lock_); + auto it = element_map_.find(&key); + if (it == element_map_.end()) { + return nullptr; + } + DoLRU(it->second); + return it->second->second; + } + + TypePtr GetLatest() { + std::lock_guard g(lock_); + TORCH_CHECK(!element_list_.empty()); + return element_list_.front().second; + } + + bool Erase(const K& key) { + if (!max_size_) { + return false; + } + std::lock_guard slock(lock_); + auto it = element_map_.find(&key); + if (it == element_map_.end()) { + return false; + } + auto lit = it->second; + element_map_.erase(it); + element_list_.erase(lit); + return true; + } + + void Clear() { + if (!max_size_) { + return; + } + std::lock_guard slock(lock_); + element_map_.clear(); + element_list_.clear(); + } + + int Numel() const { + if (!max_size_) { + return 0; + } + std::lock_guard g(lock_); + TORCH_CHECK(element_map_.size() == element_list_.size()); + return element_map_.size(); + } + + private: + using ElementList = std::list; + + struct Hasher { + size_t operator()(const K* key) const { + return hasher(*key); + } + + H hasher; + }; + + struct Equaler { + bool operator()(const K* k1, const K* k2) const { + return equaler(*k1, *k2); + } + + E equaler; + }; + + using ElementMap = std:: + unordered_map; + + void DoLRU(typename ElementList::iterator it) { + element_list_.splice(element_list_.begin(), element_list_, it); + } + + mutable std::mutex lock_; + const size_t max_size_ = 0; + ElementList element_list_; + ElementMap element_map_; +}; + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h new file mode 100644 index 0000000000000000000000000000000000000000..16d2b80701536166c817ad609db14f342e6c94d0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/config.h @@ -0,0 +1,28 @@ +#pragma once +#include +#include + +C10_DECLARE_bool(torch_lazy_ir_debug); +C10_DECLARE_bool(torch_lazy_handle_special_scalars); +C10_DECLARE_bool(torch_lazy_all_numbers_special_scalars); +C10_DECLARE_bool(torch_lazy_param_aliasing); +C10_DECLARE_bool(torch_lazy_reuse_ir); +C10_DECLARE_bool(torch_lazy_use_thread_pool); +C10_DECLARE_bool(torch_lazy_enable_device_data_cache); + +C10_DECLARE_int(torch_lazy_compilation_cache_size); +C10_DECLARE_int(torch_lazy_device_data_cache_size); +C10_DECLARE_int(torch_lazy_io_thread_pool_size); +C10_DECLARE_int(torch_lazy_metrics_samples); +C10_DECLARE_int(torch_lazy_trim_graph_check_frequency); +C10_DECLARE_int(torch_lazy_trim_graph_size); + +C10_DECLARE_string(torch_lazy_metrics_percentiles); + +C10_DECLARE_int(torch_lazy_shape_cache_size); + +namespace torch { +namespace lazy { +TORCH_API std::string& getLTCForceFallback(); +} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h new file mode 100644 index 0000000000000000000000000000000000000000..4b4e1e0749b24f619e2d97f41fba31b24d8fd31f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_dump_util.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace lazy { + +class BackendDevice; + +class TORCH_API DumpUtil { + public: + static std::string ToDot(c10::ArrayRef nodes); + + static std::string PostOrderToDot( + c10::ArrayRef post_order, + c10::ArrayRef roots); + + static std::string ToText(c10::ArrayRef nodes); + + static std::string PostOrderToText( + c10::ArrayRef post_order, + c10::ArrayRef roots); + + static std::string ToBackend( + c10::ArrayRef values, + const BackendDevice& device); +}; + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h new file mode 100644 index 0000000000000000000000000000000000000000..ea413fcfb8263ec2ac80ca090b24e0e35f1529ef --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ir_metadata.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace lazy { +struct SourceLocation { + std::string file; + std::string function; + int line = -1; +}; + +TORCH_API void EmitShortFrameInfo( + std::ostream& stream, + const std::vector& frames); + +TORCH_API std::ostream& operator<<( + std::ostream& stream, + const std::vector& frames); + +// The base class for user defined metadata which is possible to attach to IR +// nodes. +struct TORCH_API UserMetaData { + virtual ~UserMetaData() = default; +}; + +struct TORCH_API MetaData { + std::string scope; + std::vector frame_info; +}; + +// TODO(whc) is this going to be used outside of in IR decompositions? +// RAII data structure to be used a stack variable to enter a new IR scope. IR +// scope names will appear in the IR and will help identifying the source of the +// single IR nodes. +struct TORCH_API ScopePusher { + explicit ScopePusher(const std::string& name); + ~ScopePusher(); + + static void ResetScopes(); +}; + +TORCH_API MetaData GetMetaDataIfDebugging(); + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..d2edbb75ffba338bcc79e33e529fdd3005b3f1dc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/lazy_graph_executor.h @@ -0,0 +1,426 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API LazyGraphExecutor { + public: + struct DeviceDataInfo : public BackendData::Info { + DeviceDataInfo(int64_t tensor_id, bool read_only) + : tensor_id(tensor_id), read_only(read_only) {} + + int64_t tensor_id = 0; + bool read_only = false; + }; + + // Register a lazy graph executor instance that can be retrieved using Get() + static void Register(LazyGraphExecutor*); + static LazyGraphExecutor* Get(); + + virtual ~LazyGraphExecutor() = default; + + // Override these methods to perform custom tensor registration and + // unregistration Note: It is vital that the parent implementations are also + // called in order for the tensors to show up in the live tensor list + virtual void RegisterTensor(std::shared_ptr data); + virtual void UnregisterTensor(LazyTensor::Data* data); + + // Seed for random generator. + // Override to supply your own DeviceContextArena. + virtual Value GetRngSeed(const BackendDevice& device); + virtual uint64_t GetRunningSeed(const BackendDevice& device); + virtual void SetRngSeed(const BackendDevice& device, uint64_t seed); + + void DeviceBarrier(const BackendDevice& device); + + BackendDataPtr GetDeviceData( + const at::Tensor& tensor, + const BackendDevice& device); + + BackendDataPtr GetDeviceData( + const at::Scalar& value, + at::ScalarType scalar_type, + const BackendDevice& device); + + // Retrieves the set of lazy tensors which are currently live in the system, + // for the given device. If device is nullptr, the live tensors for all + // devices will be returned. Returned tensors are sorted by device as primary + // key, and by unique ID as secondary key. + std::vector GetLiveTensors(const BackendDevice* device); + + // Makes sure that any outstanding IR operation accumulated over live tensors, + // gets turned into device data. If wait is true, the sync operation will be + // run synchronously. The devices argument, if not empty, tells the devices + // which should be partecipating into the replicated computation. + virtual void SyncLiveTensorsGraph( + const BackendDevice* device, + c10::ArrayRef devices, + bool wait); + + // Applies all the pending IR operations queued over the input tensors. All + // the tensors must be on the same device. If wait is true, the sync operation + // will be run synchronously. The devices argument, if not empty, tells the + // devices which should be partecipating into the replicated computation. + void SyncTensorsGraph( + std::vector* tensors, + c10::ArrayRef devices, + bool wait, + bool sync_ltc_data); + + // Marks an execution step, which allows the tensor framework to understand + // the computation boundaries. + // Override to supply your own DeviceContextArena. + virtual void MarkStep(const BackendDevice& device); + + // Waits for all the outstanding operations on all the supplied devices. + // If devices is empty, the wait will happen for all local devices. + void WaitDeviceOps(c10::ArrayRef devices); + + // Retrieves the PyTorch CPU tensors behind the lazy tensors IR operations. + // All the tensors must be on the same device. + std::vector GetTensors(std::vector* tensors); + + size_t IncTrimCounter() const; + + // Dumps the backend specific text of the computation accumulated in the graph + // which is attached the tensors. + std::string DumpBackendComputation(const std::vector& tensors); + + Value GetDeviceDataIrValue( + const at::Scalar& value, + c10::ScalarType type, + const BackendDevice& device); + Value GetIrValueForScalar( + const at::Scalar& value, + c10::ScalarType type, + const BackendDevice& device); + Value GetIrValueForScalar( + const at::Scalar& value, + const BackendDevice& device); + + // TODO: even though this API is currently used **only** in codegen to + // generate real scalar IR values vs scalar tensors, we would like to + // use it in other cases where `GetIrValueForXXXScalar` is used, as well + // In order to do that, we need to untangle the cases where we don't need + // `expand` and where we don't expect a scalar tensor + Value GetIrValueForScalarFromCodegen( + const at::Scalar& value, + const BackendDevice& device); + Value GetIrValueForExpandedScalar( + const at::Scalar& value, + const Shape& shape, + const BackendDevice& device); + + struct CachedComputation { + explicit CachedComputation(ComputationPtr computation) + : computation(std::move(computation)) {} + + ComputationPtr computation; + }; + + using ComputationCache = Cache; + + ComputationCache* GetComputationCache(); + + hash_t GetGraphHash(const std::vector& tensors); + + protected: + // TODO(alanwaketan): Revisit if all of them need to be accessible to + // derived classes. + + struct SyncTensorsConfig { + // Whether we want to force data on the target tensors (hence trimming + // the IR graph above them). + bool force_ltc_data = true; + // Whether when setting the data, the other properties of the tensor + // state should be reset. + bool sync_ltc_data = true; + }; + + struct SyncTensorCollection { + SyncTensorCollection() : hash(0) {} + + SyncTensorsConfig config; + std::vector indices; + hash_t hash; + std::vector unlocker; + BackendDevice device; + }; + + struct PostOrderData { + std::vector post_order; + Util::EmissionMap emission_map; + std::vector parameters_data; + std::vector parameter_sequence; + }; + + // Locking: + // We perform two kinds of operations of tensors, synchronous and + // asynchronous. The ApplyPendingGraph() are synchronous, as we need the + // device data result immediately. Before the synchronous operations can + // start, they need to wait that the pending asynchronous operations have + // completed. Synchronous operations do not hold device locks, since they are + // strictly sequential, dictated by the PyTorch execution order. The + // SyncTensorsGraph() is asynchronous, and returns immediately after having + // scheduled the asynchronous operation. While executing, the asynchronous + // operations will hold locks on all the participating devices (in most common + // cases there will be only one device). + // Since asynchronous operations capture device locks, only one asynchronous + // operation can execute at the same time, on a given device. Tensor + // operations which send data to device do not need to hold any device locks + // while doing so. Only operations which _use_ device data (computations, and + // transfer from server) need to wait for asynchronous operations to complete + // (barrier). + + class DeviceLocker { + public: + explicit DeviceLocker(BackendDevice device) : device_(std::move(device)) {} + + const BackendDevice& device() const { + return device_; + } + + void Lock(); + void Unlock(std::exception_ptr exptr); + void Barrier(); + + private: + void CheckResetException(); + + BackendDevice device_; + std::mutex mutex_; + std::condition_variable cv_; + bool locked_ = false; + std::exception_ptr exptr_; + }; + + class DeviceLockerArena { + public: + static DeviceLockerArena* Get(); + + std::shared_ptr GetLocker(const BackendDevice& device); + + void DeviceBarrier(const BackendDevice& device); + + // Use a set to impose an order on the device locking sequence (ABBA + // prevention). + std::vector LockDevices( + const std::set& devices); + + private: + ExceptionCleanup LockDevice(const BackendDevice& device); + + std::mutex mutex_; + std::map> lockers_; + }; + + class DataCacheArena { + public: + static DataCacheArena* Get(); + + BackendDataPtr GetDeviceData( + const at::Tensor& tensor, + const BackendDevice& device); + + BackendDataPtr GetDeviceData( + const at::Scalar& value, + at::ScalarType scalar_type, + const BackendDevice& device); + + private: + struct TensorHasher { + size_t operator()(const at::Tensor& tensor) const; + }; + struct TensorComparer { + bool operator()(const at::Tensor& tensor1, const at::Tensor& tensor2) + const; + }; + + explicit DataCacheArena(size_t max_cache_size); + + using DataCache = + Cache; + + DataCache* GetDataCache(const BackendDevice& device); + + size_t max_cache_size_ = 0; + std::mutex mutex_; + std::map> device_caches_; + }; + + // The DeviceContextArena holds per device live information and statistics, + // among which the lazy tensors which are currently alive in the system. This + // is used to create computation "barriers" in order to flush pending + // operations and ensure the same computations are created during the training + // loops. + // TODO(alanwaketan): Add a registry such that we don't need to make all + // related methods virtual. + class DeviceContextArena { + protected: + struct DeviceContext { + std::mutex lock; + std::map> tensors_data; + uint64_t seed = 101; + uint64_t running_seed = 101; + Value seed_ir_value; + }; + + public: + static DeviceContextArena* Get(); + virtual ~DeviceContextArena() = default; + + void RegisterTensor(std::shared_ptr data); + void UnregisterTensor(LazyTensor::Data* data); + + std::vector GetLiveTensors(const BackendDevice* device); + + // Overriding it allow derived class to use their own IRs for Value. + virtual Value GetRngSeed(const BackendDevice& device); + uint64_t GetRunningSeed(const BackendDevice& device); + void SetRngSeed(const BackendDevice& device, uint64_t seed); + + void MarkStep(const BackendDevice& device); + + std::vector GetActiveDevices(); + + protected: + DeviceContext* GetDeviceContext(const BackendDevice& device); + + void ForAllDeviceContexts( + const std::function& fn, + const BackendDevice* device); + + // Overriding it allow derived class to use their own conversions. + virtual Value IrValueFromScalar( + const at::Scalar& value, + at::ScalarType scalar_type, + const BackendDevice& device); + + private: + std::vector GetAllDeviceContexts(); + + std::mutex lock_; + std::map device_contexts_; + }; + + struct Async { + Async( + SyncTensorCollection* coll, + std::vector parameters_data, + std::vector tensors_data, + ComputationCache::TypePtr cached_computation); + virtual ~Async() = default; + + void Wait(); + + MultiWait mwait; + std::vector indices; + std::vector unlocker; + std::vector parameters_data; + BackendDevice device; + ComputationCache::TypePtr cached_computation; + std::vector tensors_data; + }; + + void ResetTrimCounter() const; + + // Waits for this SyncTensorCollection's device barrier and acquire the lock. + virtual void TensorCollectionBarrier(SyncTensorCollection* coll); + + // One can override to insert your own profiler. + virtual PostOrderData RunPostOrder( + const std::vector& ir_values, + SyncTensorCollection* coll); + + private: + struct CompilationResult { + BackendDevice device; + size_t emitted_nodes = 0; + ComputationPtr computation; + std::vector parameters_data; + }; + + virtual bool ShouldSyncTensor(const LazyTensorPtr& tensor) const; + + SyncTensorCollection CollectSyncTensors( + const std::vector& tensors, + const SyncTensorsConfig& config); + + std::vector CollectRoots( + const std::vector& tensors, + c10::ArrayRef indices); + + std::vector SetTensorData( + std::vector* tensors, + const SyncTensorsConfig& config, + c10::ArrayRef indices, + const std::vector& tensor_data_vec); + + void ExtractIRAndPrepareTensorData( + std::vector* tensors, + const SyncTensorsConfig& config, + c10::ArrayRef indices, + std::vector& ir_values, + std::vector& tensor_data_vec); + + std::shared_ptr TryRunCachedSync( + std::vector* tensors, + SyncTensorCollection* coll, + PostOrderData* po_data, + const std::vector& tensor_data_vec); + + CompilationResult Compile( + const std::vector& tensors, + c10::ArrayRef devices, + const SyncTensorCollection& coll, + PostOrderData* po_data, + const std::vector& ir_values); + + ComputationCache::TypePtr LookupCachedCompile(const hash_t& hash); + + std::shared_ptr SyncTensorsGraphInternal( + std::vector* tensors, + c10::ArrayRef devices, + const SyncTensorsConfig& config); + + // Schedules the execution of a sync tensors operation in background. The + // asynchronous operation will hold the device locks by capturing the ones + // present within the coll structure. + std::shared_ptr ScheduleSyncTensorsGraph( + SyncTensorCollection* coll, + std::vector parameters_data, + std::vector tensors_data, + ComputationCache::TypePtr cached_computation); + + std::shared_ptr ScheduleSyncTensorsGraph( + std::vector* tensors, + SyncTensorCollection* coll, + std::vector parameters_data, + ComputationCache::TypePtr cached_computation, + const std::vector& tensor_data_vec); + + std::vector GetTensorsFused(std::vector* tensors); + + std::vector FetchTensors( + std::vector* tensors, + c10::ArrayRef tensors_data, + const std::vector* indices); + + // Gathers the device data for all the input tensors, after an + // asynchronous operation. + std::vector GatherTensorsData( + const std::vector& tensors, + c10::ArrayRef indices, + c10::ArrayRef tensors_data); +}; + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3abb6cb3b10858a13163957c992bba80fe9c2e27 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/ops/arithmetic_ir_ops.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +TORCH_API NodePtr operator+(const Value& node1, const Value& node2); +TORCH_API NodePtr operator-(const Value& node1, const Value& node2); +TORCH_API NodePtr operator*(const Value& node1, const Value& node2); +TORCH_API NodePtr operator/(const Value& node1, const Value& node2); + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..3a15c91c03452d84d9b6b90a27d71b5129dd2de7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor.h @@ -0,0 +1,259 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +class TORCH_API SymNodeImpl : public c10::SymNodeImpl { + public: + SymNodeImpl(NodePtr ptr) : node_(std::move(ptr)){}; + NodePtr node_; +}; + +class LazyTensor; +using LazyTensorPtr = c10::intrusive_ptr; + +class TORCH_API LazyTensor : public c10::intrusive_ptr_target { + public: + // This is the core lazy tensor data structure where all the tensor data is + // held. The lazy tensor is nothing more than a shared pointer to a Data + // object. + struct Data { + Data(BackendDataPtr handle, BackendDevice device) + : handle(std::move(handle)), + device(std::move(device)), + unique_id(GetNextTensorId()) {} + Data(Value ir_value, BackendDevice device) + : ir_value(std::move(ir_value)), + device(std::move(device)), + unique_id(GetNextTensorId()) {} + Data(at::Tensor tensor_data, BackendDevice device) + : tensor_data(std::move(tensor_data)), + device(std::move(device)), + unique_id(GetNextTensorId()) {} + // TODO(alanwaketan): Remove this ctor. This is a + // temporary ctor to ease XLA LTC migration. It depends on + // XLA's Functionalization integration. + Data(BackendDevice device) + : device(std::move(device)), unique_id(GetNextTensorId()) {} + + virtual ~Data(); + + BackendDataPtr handle; + Value ir_value; + c10::optional tensor_data; + const BackendDevice device; + const int64_t unique_id = 0; + size_t generation = 1; + }; + + static LazyTensorPtr Create( + const at::Tensor& tensor, + const BackendDevice& device); + static LazyTensorPtr Create(Value ir_value, const BackendDevice& device); + static LazyTensorPtr Create(BackendDataPtr handle); + static LazyTensorPtr Create(std::shared_ptr data); + + // The default ctor previously created a null LazyTensor (one with no 'data' + // obj). Creating a null LazyTensor is no longer possible, since the same can + // be achieved by creating a null LazyTensorPtr and it is way too confusing to + // have to check both lazy_tensor_ptr && *lazy_tensor_ptr, so everywhere that + // used to rely on a LazyTensor obj with a null Data can now rely on a null + // LazyTensorPtr instead. + LazyTensor() = delete; + LazyTensor(const LazyTensor&) = default; + LazyTensor(LazyTensor&&) noexcept = default; + + ~LazyTensor() override = default; + + size_t generation() const { + return data()->generation; + } + + // Override it to use your own Shape. + virtual int64_t size(int64_t dim) const; + + // Override it to use your own graph executor. + virtual at::Tensor ToTensor(bool detached); + + void ShallowCopyTo(LazyTensorPtr dest) const; + + // Assigns the tensor value to the lazy tensor. + void SetTensor(at::Tensor tensor); + + void UpdateFromTensor(at::Tensor tensor, bool sync); + void UpdateFromTensorOut(at::Tensor tensor); + void UpdateFromTensorOut(const LazyTensorPtr& tensor); + + const std::shared_ptr& data() const; + + // Override it to use your own type conversion. + virtual at::ScalarType dtype() const; + + MaybeRef shape() const; + + const BackendDevice& GetDevice() const; + int64_t GetUniqueId() const; + + // Fetches the data behind the tensor. If the tensor has a graph defining + // its current value, executes the graph and fetches the data result. + BackendDataPtr GetDataHandle(); + + // Fetches the current value of the data, which can be missing (nullptr) + // in case the tensor has a graph defining its current value, + BackendDataPtr CurrentDataHandle() const; + + void SetDataHandle(BackendDataPtr handle); + void SetDataHandle(BackendDataPtr handle, bool sync); + + // Retrieves the current IR Node, or nullptr in case no active IR Node is + // available. + Value CurrentIrValue() const; + + // Retrieves the IR Node representing this LazyTensor. One will be created if + // missing. Note that although this is a const API, it actually changes the + // internal state ofthe object. + Value GetIrValue() const; + + void SetIrValue(Value ir_value); + void SetInPlaceIrValue(Value ir_value); + + c10::optional CurrentTensorData() const; + + std::vector MakeOutputTensors(NodePtr node) const; + + LazyTensorPtr CopyTensorToDevice(const BackendDevice& device); + + // Applies the queue of operations in preparation for using the data. + // Override it to use your own graph executor. + virtual void ApplyPendingGraph(); + + // Override it to set extra information. + virtual void AssignIrValue(Value ir_value) const; + + protected: + explicit LazyTensor(std::shared_ptr data); + + void SetTensorData(at::Tensor tensor_data); + + // We build a graph accumulating operations, but at a given point we + // need to force a rendering, otherwise the graph can grow without control. + // Think: + // for i in range(0, 100000): + // a = a + b + void TryLimitGraphSize(); + + // Override it to instantiate your own data. + virtual Value GetIrValueForTensor( + const at::Tensor& tensor, + const BackendDevice& device) const; + + Value CreateTensorNode(BackendDataPtr data, bool read_only) const; + + private: + LazyTensor(const at::Tensor& tensor, const BackendDevice& device); + LazyTensor(Value ir_value, const BackendDevice& device); + explicit LazyTensor(BackendDataPtr handle); + + static int64_t GetNextTensorId(); + + std::shared_ptr data_; +}; + +// Utils to convert at::Tensor to LazyTensor, and vice versa. + +// Section 0: c10::Tensorlist ==> lazy::TensorList +// note: GetTensorList is not totally parallel to GetLtcTensor; A TensorList +// skips +// the LazyTensor wrappers, assuming that the list of underlying IR nodes +// is actually more useful for downstream computations. TBD. +TORCH_API torch::lazy::Value GetTensorList(at::ITensorListRef tensors); + +// Section 1: at::Tensor => LazyTensor. +// Extracts the LazyTensor out of an at::Tensor. Returns a null LazyTensor +// if the tensor is not a lazy tensor. +TORCH_API LazyTensorPtr TryGetLtcTensor(const at::Tensor& tensor); + +// Extracts the LazyTensor out of an at::Tensor. Throws an exception +// if the tensor is not a lazy tensor. +TORCH_API LazyTensorPtr GetLtcTensor(const at::Tensor& tensor); + +// Same as above, applied to a list of tensors. +TORCH_API std::vector GetLtcTensors( + c10::ArrayRef tensors); + +// If tensor is a lazy tensor type, returns the LazyTensor embedded within it, +// otherwise creates a new lazy tensor type with tensor as data. +TORCH_API LazyTensorPtr GetOrCreateLtcTensor( + const c10::optional& tensor, + const BackendDevice& device); + +TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber( + const at::Tensor& tensor, + const BackendDevice& device); + +// Section 2: LazyTensor => at::Tensor. +// Creates an ATen tensor from an LazyTensor. +TORCH_API at::Tensor CreateAtenFromLtcTensor(const LazyTensorPtr& ltc_tensor); +TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor); + +// Note [Lazy Tensor Functionalization] +// The functionalization pass is implemented by wrapping all TensorImpl +// objects in C++ with an extra FunctionalTensorWrapper object, +// that knows how to perform functionalization +// +// Certain functions in the aten API serve as entry/exit points for +// functionalization, where we need to perform the wrapping/unwrapping: +// - aten::to.device +// - aten::empty + +// Given a non-lazy tensor, this function creates a lazy tensor on the specified +// (lazy) device. The functionalize_output determines whether or not we should +// wrap the output in a "functional wrapper". +// +// How do you know whether to pass true/false for functionalize_output? +// +// Case 1: nonlazy -> lazy +// If you're implementing a function that takes in nonlazy tensors and returns +// lazy tensors, then you should think of that function as an "entrypoint" to +// functionalization, and use functionalize_output=true Examples include: +// - factory functions (the LTC kernel for at::empty) +// - CPU -> Lazy device converions (the LTC kernel for at::to_device) +// +// Case 2: lazy -> lazy +// If you're implementing a function that takes in lazy tensors and returns +// lazy tensors, +// **but** requires creating lazy tensors internally, +// then you can assume that the current function is running inside of some +// outer context where functionalization is already running, that will take +// care of doing the wrapping for you, and use functionalize_output=true +// Examples include: +// - CPU fallback (takes in lazy tensors, converts to cpu, calls kernel, +// converts returns back to lazy tensors). +TORCH_API at::Tensor to_lazy_tensor( + const at::Tensor& self, + const c10::TensorOptions& options, + at::Device device, + bool non_blocking, + bool functionalize_output); + +template +auto TupleAtenFromLtcTensorsImpl( + const std::vector& tensors, + std::index_sequence) { + return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...); +} + +template +auto TupleAtenFromLtcTensors(const std::vector& tensors) { + return TupleAtenFromLtcTensorsImpl(tensors, std::make_index_sequence{}); +} + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e4e6a1b7f0c26dc8e1e235b0487666fb0b3dfd9e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/tensor_util.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include + +#include + +#include +#include + +namespace torch { +namespace lazy { + +TORCH_API std::vector ComputeArrayStrides( + c10::ArrayRef sizes); + +TORCH_API std::vector DataHandlesToTensors( + c10::ArrayRef data_handles, + at::ScalarType dest_element_type); + +// Uploads an ATEN tensor data to the device and fetches the corresponding +// device data handle. +TORCH_API BackendDataPtr +TensorToDataHandle(const at::Tensor& tensor, const BackendDevice& device); + +// Retrieves the device data handles by parallel uploading data onto the +// corresponding devices. +TORCH_API std::vector CreateTensorsData( + const std::vector& tensors, + const std::vector& devices); + +// Makes a deep copy of an ATEN tensor. +inline at::Tensor CopyTensor(const at::Tensor& ref) { + return ref.to(ref.options(), /*non_blocking=*/false, /*copy=*/true); +} + +// Same as above, with an additional cast. +inline at::Tensor CopyTensor( + const at::Tensor& ref, + at::ScalarType dest_type, + bool copy = true) { + return ref.to(ref.options().dtype(dest_type), /*non_blocking=*/false, copy); +} + +template +T OptionalOr(const c10::optional& value, T defval) { + return value ? static_cast(*value) : defval; +} + +// Unwraps tensor to target dtype if it's a wrapped number. +inline at::Tensor UnwrapNumber(const at::Tensor& tensor, at::ScalarType dtype) { + return tensor.unsafeGetTensorImpl()->is_wrapped_number() ? tensor.to(dtype) + : tensor; +} + +template +at::Scalar MakeIntScalar(T value) { + return at::Scalar(static_cast(value)); +} + +// Routing values to device data maximizes the changes for compilation cache +// hits, but it can prevent the compiler to perform optimizations. So tensor +// values which are within a given set, are routed to constant scalars if this +// API returns true. +TORCH_API bool IsSpecialScalar(const at::Scalar& value); + +// Note: returns a reference instead of a fresh tensor to avoid refcount bumps. +inline const at::Tensor& maybe_unwrap_functional(const at::Tensor& tensor) { + if (at::functionalization::impl::isFunctionalTensor(tensor)) { + return at::functionalization::impl::unsafeGetFunctionalWrapper(tensor) + ->value(); + } else { + return tensor; + } +} + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..571a55b468fdd5a7114aa3c30221604ed0113795 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/thread_pool.h @@ -0,0 +1,37 @@ +/** + * This file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h + */ + +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace lazy { + +class TORCH_API Completion { + public: + class Data; + + explicit Completion(std::shared_ptr data); + + ~Completion(); + + void Wait(); + + private: + std::shared_ptr data_; +}; + +// Schedules a closure which might wait for IO or other events/conditions. +TORCH_API void ScheduleIoClosure(std::function closure); +TORCH_API Completion +ScheduleIoClosureWithCompletion(std::function closure); + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h new file mode 100644 index 0000000000000000000000000000000000000000..a3d35783ae9691694dfe370bf1371b053fa6eb3c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/core/util.h @@ -0,0 +1,126 @@ +/** + * Most of the utils in this file is adapted from PyTorch/XLA + * https://github.com/pytorch/xla/blob/master/third_party/xla_client/util.h + */ + +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace lazy { + +// Similar to c10::scope_exit but with a status. +// TODO(alanwaketan): Consolidate it with c10::scope_exit. +template +class Cleanup { + public: + using StatusType = T; + + explicit Cleanup(std::function&& func) + : func_(std::move(func)) {} + Cleanup(Cleanup&& ref) noexcept + : func_(std::move(ref.func_)), status_(std::move(ref.status_)) {} + Cleanup(const Cleanup&) = delete; + + ~Cleanup() { + if (func_ != nullptr) { + func_(std::move(status_)); + } + } + + Cleanup& operator=(const Cleanup&) = delete; + + Cleanup& operator=(Cleanup&& ref) noexcept { + if (this != &ref) { + func_ = std::move(ref.func_); + status_ = std::move(ref.status_); + } + return *this; + } + + void Release() { + func_ = nullptr; + } + + void SetStatus(StatusType&& status) { + status_ = std::move(status); + } + + const StatusType& GetStatus() const { + return status_; + } + + private: + std::function func_; + StatusType status_; +}; + +using ExceptionCleanup = Cleanup; + +// Allows APIs which might return const references and values, to not be forced +// to return values in the signature. +// TODO(alanwaketan): This is clever, but is there really no std or c10 +// supports? Needs more investigations. +template +class MaybeRef { + public: + /* implicit */ MaybeRef(const T& ref) : ref_(ref) {} + /* implicit */ MaybeRef(T&& value) + : storage_(std::move(value)), ref_(*storage_) {} + + const T& Get() const { + return ref_; + } + const T& operator*() const { + return Get(); + } + operator const T&() const { + return Get(); + } + + bool IsStored() const { + return storage_.has_value(); + } + + private: + c10::optional storage_; + const T& ref_; +}; + +template +std::vector Iota(size_t size, T init = 0, T incr = 1) { + std::vector result(size); + T value = init; + for (size_t i = 0; i < size; ++i, value += incr) { + result[i] = value; + } + return result; +} + +template +std::vector ToVector(const S& input) { + return std::vector(input.begin(), input.end()); +} + +template +c10::optional> ToOptionalVector( + c10::OptionalArrayRef arrayRef) { + if (arrayRef) { + return arrayRef->vec(); + } + return c10::nullopt; +} + +template +typename std::underlying_type::type GetEnumValue(T value) { + return static_cast::type>(value); +} + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h new file mode 100644 index 0000000000000000000000000000000000000000..8040a023de518476e65c04ff426f1807c19b14c4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/python/python_util.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +c10::optional TORCH_PYTHON_API GetPythonFrameTop(); + +std::vector TORCH_PYTHON_API GetPythonFrames(); + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h new file mode 100644 index 0000000000000000000000000000000000000000..ac0320b9d0ac3cca01ddfc12f01e4a232fdda0df --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/config.h @@ -0,0 +1,7 @@ +#pragma once +#include + +// TODO(whc) unclear if this is useful, has only been tested as true +C10_DECLARE_bool(torch_lazy_ts_tensor_update_sync); + +C10_DECLARE_bool(torch_lazy_ts_cuda); diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf663f4ca6b1b78634a15bd65617328e92584239 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/tensor_aten_ops.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +////////////////////////////////////////////////////////////////////////////// +// ATEN operators follows here, listed in alphabetical order. +////////////////////////////////////////////////////////////////////////////// + +void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src); +// Fills the input with the given value. +void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value); + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..7e01724470384497a5fcdc9102d4b06403bdc640 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_autograd_functions.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace torch { +namespace lazy { + +struct MaxPool3dAutogradFunctionTS + : public torch::autograd::Function { + static at::Tensor forward( + torch::autograd::AutogradContext* ctx, + at::Tensor self, + at::IntArrayRef kernel_size, + at::IntArrayRef stride, + at::IntArrayRef padding, + at::IntArrayRef dilation, + bool ceil_mode); + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + torch::autograd::variable_list grad_output); +}; + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h new file mode 100644 index 0000000000000000000000000000000000000000..62cc9016f6ffa2e50fb4d19501823eedcc0befde --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/lazy/ts_backend/ts_node.h @@ -0,0 +1,106 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace lazy { + +using TSOpVector = std::vector; + +class TORCH_API TsNode : public lazy::Node { + public: + TsNode( + OpKind op, + OpList operands, + std::vector&& shapes, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + TsNode( + OpKind op, + OpList operands, + const std::function& shape_fn, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + TsNode( + OpKind op, + OpList operands, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + TsNode( + OpKind op, + Shape shape, + size_t num_outputs, + hash_t hash_seed = kHashSeed); + + ~TsNode() override = default; + + hash_t hash() const override; + + hash_t shapeHash() const override; + + const std::string getPythonStacktrace() const; + + // Lower is a backend-specific method since it returns a backend specific + // type. hence, it is convenient to define it differently per-backend rather + // than at Node API + virtual TSOpVector Lower( + std::shared_ptr function, + TSLoweringContext* loctx) const; + + private: + // The hash of the dag WITH size info. Used for shape caching + hash_t shape_hash_; + // The hash of the dag used to look up the compiled graph by a hash + // in this case, we will use the dag hash WITHOUT size info if dynamic shape + // is enabled and use the dag hash WITH size info otherwise. + hash_t dag_hash_; +}; + +// Note: this OpKind is separate from ltc_ops.h since it would be a circular +// import otherwise, I like leaving TensorList in this file, and I think most of +// ltc_ops special cases will be deleted anyway +const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list"); + +// TensorList represents an at::TensorList which is a vector[Tensor] but is also +// a first-class IValue and can be fed as a single input to a TS program. It is +// much easier to handle TensorLists in Lazy Tensor code if they are represented +// as a single Node so there can be more than one TensorList and more than one +// Tensor side-by-side as operands to an op. +// +// Note: shape is undefined for TensorList. We assert in some places that +// #shapes matches #outputs and this stems from +// the fact that currently all IR nodes represent tensors (there is no +// type system for this IR). Becuase of this, TensorList is a bit of a +// hack. +// +// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and +// then implement it as NotImplemented for TensorList, also fixing the assertion +// that would fail. +struct TORCH_API TensorList : public TsNode { + static OpKind ClassOpKind() { + return tensor_list_opkind; + } + + TensorList() = delete; + TensorList(OpList values); + + bool CanBeReused(OpList values) const { + return operands() == std::vector(values.begin(), values.end()); + } + + TSOpVector Lower( + std::shared_ptr function, + TSLoweringContext* loctx) const override; +}; + +} // namespace lazy +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h new file mode 100644 index 0000000000000000000000000000000000000000..923aca2097d32d1a6b770b60cd2c9a5b3786b8b8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/init.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::onnx { + +void initONNXBindings(PyObject* module); + +} // namespace torch::onnx diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..df887844ff66564662ab4a179911180a73132c37 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/onnx/onnx.h @@ -0,0 +1,20 @@ +#pragma once + +namespace torch::onnx { + +enum class OperatorExportTypes { + ONNX, // Strict ONNX export + ONNX_ATEN, // ONNX With ATen op everywhere + ONNX_ATEN_FALLBACK, // ONNX export with ATen fallback + ONNX_FALLTHROUGH, // Export supported ONNX ops. Pass through unsupported ops. +}; + +enum class TrainingMode { + EVAL, // Inference mode + PRESERVE, // Preserve model state (eval/training) + TRAINING, // Training mode +}; + +constexpr char kOnnxNodeNameAttribute[] = "onnx_name"; + +} // namespace torch::onnx diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69629db6831ee64cd0ca3b3372b98b32b122d629 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/dpll.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/dpll.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5169b14db0f1d4b2813484c4ed7fa06835ff5101 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/dpll.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/dpll2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/dpll2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f60505f1cbfd4ab79a821b3bc6cd7aaba364d28f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/dpll2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/lra_theory.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/lra_theory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9286802bc7dc88d28558e989589f7c4cbbc2123c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/lra_theory.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/minisat22_wrapper.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/minisat22_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3672d57dc3483f12104410e6bedf9f5344337ee5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/minisat22_wrapper.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/pycosat_wrapper.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/pycosat_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44f111956c7be9e8ba77f9672963dd25769c49e9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/pycosat_wrapper.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/z3_wrapper.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/z3_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7041a8e93b8312be4e9af5f5150e75fb95130b43 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/__pycache__/z3_wrapper.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/dpll.py b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/dpll.py new file mode 100644 index 0000000000000000000000000000000000000000..40e6802f7626c982a9a6cd7146baea3ac6b8b6e0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/dpll.py @@ -0,0 +1,308 @@ +"""Implementation of DPLL algorithm + +Further improvements: eliminate calls to pl_true, implement branching rules, +efficient unit propagation. + +References: + - https://en.wikipedia.org/wiki/DPLL_algorithm + - https://www.researchgate.net/publication/242384772_Implementations_of_the_DPLL_Algorithm +""" + +from sympy.core.sorting import default_sort_key +from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \ + to_int_repr, _find_predicates +from sympy.assumptions.cnf import CNF +from sympy.logic.inference import pl_true, literal_symbol + + +def dpll_satisfiable(expr): + """ + Check satisfiability of a propositional sentence. + It returns a model rather than True when it succeeds + + >>> from sympy.abc import A, B + >>> from sympy.logic.algorithms.dpll import dpll_satisfiable + >>> dpll_satisfiable(A & ~B) + {A: True, B: False} + >>> dpll_satisfiable(A & ~A) + False + + """ + if not isinstance(expr, CNF): + clauses = conjuncts(to_cnf(expr)) + else: + clauses = expr.clauses + if False in clauses: + return False + symbols = sorted(_find_predicates(expr), key=default_sort_key) + symbols_int_repr = set(range(1, len(symbols) + 1)) + clauses_int_repr = to_int_repr(clauses, symbols) + result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {}) + if not result: + return result + output = {} + for key in result: + output.update({symbols[key - 1]: result[key]}) + return output + + +def dpll(clauses, symbols, model): + """ + Compute satisfiability in a partial model. + Clauses is an array of conjuncts. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import dpll + >>> dpll([A, B, D], [A, B], {D: False}) + False + + """ + # compute DP kernel + P, value = find_unit_clause(clauses, model) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = ~P + clauses = unit_propagate(clauses, P) + P, value = find_unit_clause(clauses, model) + P, value = find_pure_symbol(symbols, clauses) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = ~P + clauses = unit_propagate(clauses, P) + P, value = find_pure_symbol(symbols, clauses) + # end DP kernel + unknown_clauses = [] + for c in clauses: + val = pl_true(c, model) + if val is False: + return False + if val is not True: + unknown_clauses.append(c) + if not unknown_clauses: + return model + if not clauses: + return model + P = symbols.pop() + model_copy = model.copy() + model.update({P: True}) + model_copy.update({P: False}) + symbols_copy = symbols[:] + return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or + dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy)) + + +def dpll_int_repr(clauses, symbols, model): + """ + Compute satisfiability in a partial model. + Arguments are expected to be in integer representation + + >>> from sympy.logic.algorithms.dpll import dpll_int_repr + >>> dpll_int_repr([{1}, {2}, {3}], {1, 2}, {3: False}) + False + + """ + # compute DP kernel + P, value = find_unit_clause_int_repr(clauses, model) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = -P + clauses = unit_propagate_int_repr(clauses, P) + P, value = find_unit_clause_int_repr(clauses, model) + P, value = find_pure_symbol_int_repr(symbols, clauses) + while P: + model.update({P: value}) + symbols.remove(P) + if not value: + P = -P + clauses = unit_propagate_int_repr(clauses, P) + P, value = find_pure_symbol_int_repr(symbols, clauses) + # end DP kernel + unknown_clauses = [] + for c in clauses: + val = pl_true_int_repr(c, model) + if val is False: + return False + if val is not True: + unknown_clauses.append(c) + if not unknown_clauses: + return model + P = symbols.pop() + model_copy = model.copy() + model.update({P: True}) + model_copy.update({P: False}) + symbols_copy = symbols.copy() + return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or + dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy)) + +### helper methods for DPLL + + +def pl_true_int_repr(clause, model={}): + """ + Lightweight version of pl_true. + Argument clause represents the set of args of an Or clause. This is used + inside dpll_int_repr, it is not meant to be used directly. + + >>> from sympy.logic.algorithms.dpll import pl_true_int_repr + >>> pl_true_int_repr({1, 2}, {1: False}) + >>> pl_true_int_repr({1, 2}, {1: False, 2: False}) + False + + """ + result = False + for lit in clause: + if lit < 0: + p = model.get(-lit) + if p is not None: + p = not p + else: + p = model.get(lit) + if p is True: + return True + elif p is None: + result = None + return result + + +def unit_propagate(clauses, symbol): + """ + Returns an equivalent set of clauses + If a set of clauses contains the unit clause l, the other clauses are + simplified by the application of the two following rules: + + 1. every clause containing l is removed + 2. in every clause that contains ~l this literal is deleted + + Arguments are expected to be in CNF. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import unit_propagate + >>> unit_propagate([A | B, D | ~B, B], B) + [D, B] + + """ + output = [] + for c in clauses: + if c.func != Or: + output.append(c) + continue + for arg in c.args: + if arg == ~symbol: + output.append(Or(*[x for x in c.args if x != ~symbol])) + break + if arg == symbol: + break + else: + output.append(c) + return output + + +def unit_propagate_int_repr(clauses, s): + """ + Same as unit_propagate, but arguments are expected to be in integer + representation + + >>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr + >>> unit_propagate_int_repr([{1, 2}, {3, -2}, {2}], 2) + [{3}] + + """ + negated = {-s} + return [clause - negated for clause in clauses if s not in clause] + + +def find_pure_symbol(symbols, unknown_clauses): + """ + Find a symbol and its value if it appears only as a positive literal + (or only as a negative) in clauses. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import find_pure_symbol + >>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A]) + (A, True) + + """ + for sym in symbols: + found_pos, found_neg = False, False + for c in unknown_clauses: + if not found_pos and sym in disjuncts(c): + found_pos = True + if not found_neg and Not(sym) in disjuncts(c): + found_neg = True + if found_pos != found_neg: + return sym, found_pos + return None, None + + +def find_pure_symbol_int_repr(symbols, unknown_clauses): + """ + Same as find_pure_symbol, but arguments are expected + to be in integer representation + + >>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr + >>> find_pure_symbol_int_repr({1,2,3}, + ... [{1, -2}, {-2, -3}, {3, 1}]) + (1, True) + + """ + all_symbols = set().union(*unknown_clauses) + found_pos = all_symbols.intersection(symbols) + found_neg = all_symbols.intersection([-s for s in symbols]) + for p in found_pos: + if -p not in found_neg: + return p, True + for p in found_neg: + if -p not in found_pos: + return -p, False + return None, None + + +def find_unit_clause(clauses, model): + """ + A unit clause has only 1 variable that is not bound in the model. + + >>> from sympy.abc import A, B, D + >>> from sympy.logic.algorithms.dpll import find_unit_clause + >>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True}) + (B, False) + + """ + for clause in clauses: + num_not_in_model = 0 + for literal in disjuncts(clause): + sym = literal_symbol(literal) + if sym not in model: + num_not_in_model += 1 + P, value = sym, not isinstance(literal, Not) + if num_not_in_model == 1: + return P, value + return None, None + + +def find_unit_clause_int_repr(clauses, model): + """ + Same as find_unit_clause, but arguments are expected to be in + integer representation. + + >>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr + >>> find_unit_clause_int_repr([{1, 2, 3}, + ... {2, -3}, {1, -2}], {1: True}) + (2, False) + + """ + bound = set(model) | {-sym for sym in model} + for clause in clauses: + unbound = clause - bound + if len(unbound) == 1: + p = unbound.pop() + if p < 0: + return -p, False + else: + return p, True + return None, None diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/dpll2.py b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/dpll2.py new file mode 100644 index 0000000000000000000000000000000000000000..75115e01efb221d765e81b195fcf20293c26abee --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/dpll2.py @@ -0,0 +1,684 @@ +"""Implementation of DPLL algorithm + +Features: + - Clause learning + - Watch literal scheme + - VSIDS heuristic + +References: + - https://en.wikipedia.org/wiki/DPLL_algorithm +""" + +from collections import defaultdict +from heapq import heappush, heappop + +from sympy.core.sorting import ordered +from sympy.assumptions.cnf import EncodedCNF + +from sympy.logic.algorithms.lra_theory import LRASolver + + +def dpll_satisfiable(expr, all_models=False, use_lra_theory=False): + """ + Check satisfiability of a propositional sentence. + It returns a model rather than True when it succeeds. + Returns a generator of all models if all_models is True. + + Examples + ======== + + >>> from sympy.abc import A, B + >>> from sympy.logic.algorithms.dpll2 import dpll_satisfiable + >>> dpll_satisfiable(A & ~B) + {A: True, B: False} + >>> dpll_satisfiable(A & ~A) + False + + """ + if not isinstance(expr, EncodedCNF): + exprs = EncodedCNF() + exprs.add_prop(expr) + expr = exprs + + # Return UNSAT when False (encoded as 0) is present in the CNF + if {0} in expr.data: + if all_models: + return (f for f in [False]) + return False + + if use_lra_theory: + lra, immediate_conflicts = LRASolver.from_encoded_cnf(expr) + else: + lra = None + immediate_conflicts = [] + solver = SATSolver(expr.data + immediate_conflicts, expr.variables, set(), expr.symbols, lra_theory=lra) + models = solver._find_model() + + if all_models: + return _all_models(models) + + try: + return next(models) + except StopIteration: + return False + + # Uncomment to confirm the solution is valid (hitting set for the clauses) + #else: + #for cls in clauses_int_repr: + #assert solver.var_settings.intersection(cls) + + +def _all_models(models): + satisfiable = False + try: + while True: + yield next(models) + satisfiable = True + except StopIteration: + if not satisfiable: + yield False + + +class SATSolver: + """ + Class for representing a SAT solver capable of + finding a model to a boolean theory in conjunctive + normal form. + """ + + def __init__(self, clauses, variables, var_settings, symbols=None, + heuristic='vsids', clause_learning='none', INTERVAL=500, + lra_theory = None): + + self.var_settings = var_settings + self.heuristic = heuristic + self.is_unsatisfied = False + self._unit_prop_queue = [] + self.update_functions = [] + self.INTERVAL = INTERVAL + + if symbols is None: + self.symbols = list(ordered(variables)) + else: + self.symbols = symbols + + self._initialize_variables(variables) + self._initialize_clauses(clauses) + + if 'vsids' == heuristic: + self._vsids_init() + self.heur_calculate = self._vsids_calculate + self.heur_lit_assigned = self._vsids_lit_assigned + self.heur_lit_unset = self._vsids_lit_unset + self.heur_clause_added = self._vsids_clause_added + + # Note: Uncomment this if/when clause learning is enabled + #self.update_functions.append(self._vsids_decay) + + else: + raise NotImplementedError + + if 'simple' == clause_learning: + self.add_learned_clause = self._simple_add_learned_clause + self.compute_conflict = self._simple_compute_conflict + self.update_functions.append(self._simple_clean_clauses) + elif 'none' == clause_learning: + self.add_learned_clause = lambda x: None + self.compute_conflict = lambda: None + else: + raise NotImplementedError + + # Create the base level + self.levels = [Level(0)] + self._current_level.varsettings = var_settings + + # Keep stats + self.num_decisions = 0 + self.num_learned_clauses = 0 + self.original_num_clauses = len(self.clauses) + + self.lra = lra_theory + + def _initialize_variables(self, variables): + """Set up the variable data structures needed.""" + self.sentinels = defaultdict(set) + self.occurrence_count = defaultdict(int) + self.variable_set = [False] * (len(variables) + 1) + + def _initialize_clauses(self, clauses): + """Set up the clause data structures needed. + + For each clause, the following changes are made: + - Unit clauses are queued for propagation right away. + - Non-unit clauses have their first and last literals set as sentinels. + - The number of clauses a literal appears in is computed. + """ + self.clauses = [list(clause) for clause in clauses] + + for i, clause in enumerate(self.clauses): + + # Handle the unit clauses + if 1 == len(clause): + self._unit_prop_queue.append(clause[0]) + continue + + self.sentinels[clause[0]].add(i) + self.sentinels[clause[-1]].add(i) + + for lit in clause: + self.occurrence_count[lit] += 1 + + def _find_model(self): + """ + Main DPLL loop. Returns a generator of models. + + Variables are chosen successively, and assigned to be either + True or False. If a solution is not found with this setting, + the opposite is chosen and the search continues. The solver + halts when every variable has a setting. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> list(l._find_model()) + [{1: True, 2: False, 3: False}, {1: True, 2: True, 3: True}] + + >>> from sympy.abc import A, B, C + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set(), [A, B, C]) + >>> list(l._find_model()) + [{A: True, B: False, C: False}, {A: True, B: True, C: True}] + + """ + + # We use this variable to keep track of if we should flip a + # variable setting in successive rounds + flip_var = False + + # Check if unit prop says the theory is unsat right off the bat + self._simplify() + if self.is_unsatisfied: + return + + # While the theory still has clauses remaining + while True: + # Perform cleanup / fixup at regular intervals + if self.num_decisions % self.INTERVAL == 0: + for func in self.update_functions: + func() + + if flip_var: + # We have just backtracked and we are trying to opposite literal + flip_var = False + lit = self._current_level.decision + + else: + # Pick a literal to set + lit = self.heur_calculate() + self.num_decisions += 1 + + # Stopping condition for a satisfying theory + if 0 == lit: + + # check if assignment satisfies lra theory + if self.lra: + for enc_var in self.var_settings: + res = self.lra.assert_lit(enc_var) + if res is not None: + break + res = self.lra.check() + self.lra.reset_bounds() + else: + res = None + if res is None or res[0]: + yield {self.symbols[abs(lit) - 1]: + lit > 0 for lit in self.var_settings} + else: + self._simple_add_learned_clause(res[1]) + + while self._current_level.flipped: + self._undo() + if len(self.levels) == 1: + return + flip_lit = -self._current_level.decision + self._undo() + self.levels.append(Level(flip_lit, flipped=True)) + flip_var = True + continue + + # Start the new decision level + self.levels.append(Level(lit)) + + # Assign the literal, updating the clauses it satisfies + self._assign_literal(lit) + + # _simplify the theory + self._simplify() + + # Check if we've made the theory unsat + if self.is_unsatisfied: + + self.is_unsatisfied = False + + # We unroll all of the decisions until we can flip a literal + while self._current_level.flipped: + self._undo() + + # If we've unrolled all the way, the theory is unsat + if 1 == len(self.levels): + return + + # Detect and add a learned clause + self.add_learned_clause(self.compute_conflict()) + + # Try the opposite setting of the most recent decision + flip_lit = -self._current_level.decision + self._undo() + self.levels.append(Level(flip_lit, flipped=True)) + flip_var = True + + ######################## + # Helper Methods # + ######################## + @property + def _current_level(self): + """The current decision level data structure + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{1}, {2}], {1, 2}, set()) + >>> next(l._find_model()) + {1: True, 2: True} + >>> l._current_level.decision + 0 + >>> l._current_level.flipped + False + >>> l._current_level.var_settings + {1, 2} + + """ + return self.levels[-1] + + def _clause_sat(self, cls): + """Check if a clause is satisfied by the current variable setting. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{1}, {-1}], {1}, set()) + >>> try: + ... next(l._find_model()) + ... except StopIteration: + ... pass + >>> l._clause_sat(0) + False + >>> l._clause_sat(1) + True + + """ + for lit in self.clauses[cls]: + if lit in self.var_settings: + return True + return False + + def _is_sentinel(self, lit, cls): + """Check if a literal is a sentinel of a given clause. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> l._is_sentinel(2, 3) + True + >>> l._is_sentinel(-3, 1) + False + + """ + return cls in self.sentinels[lit] + + def _assign_literal(self, lit): + """Make a literal assignment. + + The literal assignment must be recorded as part of the current + decision level. Additionally, if the literal is marked as a + sentinel of any clause, then a new sentinel must be chosen. If + this is not possible, then unit propagation is triggered and + another literal is added to the queue to be set in the future. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> l.var_settings + {-3, -2, 1} + + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> l._assign_literal(-1) + >>> try: + ... next(l._find_model()) + ... except StopIteration: + ... pass + >>> l.var_settings + {-1} + + """ + self.var_settings.add(lit) + self._current_level.var_settings.add(lit) + self.variable_set[abs(lit)] = True + self.heur_lit_assigned(lit) + + sentinel_list = list(self.sentinels[-lit]) + + for cls in sentinel_list: + if not self._clause_sat(cls): + other_sentinel = None + for newlit in self.clauses[cls]: + if newlit != -lit: + if self._is_sentinel(newlit, cls): + other_sentinel = newlit + elif not self.variable_set[abs(newlit)]: + self.sentinels[-lit].remove(cls) + self.sentinels[newlit].add(cls) + other_sentinel = None + break + + # Check if no sentinel update exists + if other_sentinel: + self._unit_prop_queue.append(other_sentinel) + + def _undo(self): + """ + _undo the changes of the most recent decision level. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> level = l._current_level + >>> level.decision, level.var_settings, level.flipped + (-3, {-3, -2}, False) + >>> l._undo() + >>> level = l._current_level + >>> level.decision, level.var_settings, level.flipped + (0, {1}, False) + + """ + # Undo the variable settings + for lit in self._current_level.var_settings: + self.var_settings.remove(lit) + self.heur_lit_unset(lit) + self.variable_set[abs(lit)] = False + + # Pop the level off the stack + self.levels.pop() + + ######################### + # Propagation # + ######################### + """ + Propagation methods should attempt to soundly simplify the boolean + theory, and return True if any simplification occurred and False + otherwise. + """ + def _simplify(self): + """Iterate over the various forms of propagation to simplify the theory. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> l.variable_set + [False, False, False, False] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4}} + + >>> l._simplify() + + >>> l.variable_set + [False, True, False, False] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, -1: set(), 2: {0, 3}, + ...3: {2, 4}} + + """ + changed = True + while changed: + changed = False + changed |= self._unit_prop() + changed |= self._pure_literal() + + def _unit_prop(self): + """Perform unit propagation on the current theory.""" + result = len(self._unit_prop_queue) > 0 + while self._unit_prop_queue: + next_lit = self._unit_prop_queue.pop() + if -next_lit in self.var_settings: + self.is_unsatisfied = True + self._unit_prop_queue = [] + return False + else: + self._assign_literal(next_lit) + + return result + + def _pure_literal(self): + """Look for pure literals and assign them when found.""" + return False + + ######################### + # Heuristics # + ######################### + def _vsids_init(self): + """Initialize the data structures needed for the VSIDS heuristic.""" + self.lit_heap = [] + self.lit_scores = {} + + for var in range(1, len(self.variable_set)): + self.lit_scores[var] = float(-self.occurrence_count[var]) + self.lit_scores[-var] = float(-self.occurrence_count[-var]) + heappush(self.lit_heap, (self.lit_scores[var], var)) + heappush(self.lit_heap, (self.lit_scores[-var], -var)) + + def _vsids_decay(self): + """Decay the VSIDS scores for every literal. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.lit_scores + {-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0} + + >>> l._vsids_decay() + + >>> l.lit_scores + {-3: -1.0, -2: -1.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -1.0} + + """ + # We divide every literal score by 2 for a decay factor + # Note: This doesn't change the heap property + for lit in self.lit_scores.keys(): + self.lit_scores[lit] /= 2.0 + + def _vsids_calculate(self): + """ + VSIDS Heuristic Calculation + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.lit_heap + [(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)] + + >>> l._vsids_calculate() + -3 + + >>> l.lit_heap + [(-2.0, -2), (-2.0, 2), (0.0, -1), (0.0, 1), (-2.0, 3)] + + """ + if len(self.lit_heap) == 0: + return 0 + + # Clean out the front of the heap as long the variables are set + while self.variable_set[abs(self.lit_heap[0][1])]: + heappop(self.lit_heap) + if len(self.lit_heap) == 0: + return 0 + + return heappop(self.lit_heap)[1] + + def _vsids_lit_assigned(self, lit): + """Handle the assignment of a literal for the VSIDS heuristic.""" + pass + + def _vsids_lit_unset(self, lit): + """Handle the unsetting of a literal for the VSIDS heuristic. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> l.lit_heap + [(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)] + + >>> l._vsids_lit_unset(2) + + >>> l.lit_heap + [(-2.0, -3), (-2.0, -2), (-2.0, -2), (-2.0, 2), (-2.0, 3), (0.0, -1), + ...(-2.0, 2), (0.0, 1)] + + """ + var = abs(lit) + heappush(self.lit_heap, (self.lit_scores[var], var)) + heappush(self.lit_heap, (self.lit_scores[-var], -var)) + + def _vsids_clause_added(self, cls): + """Handle the addition of a new clause for the VSIDS heuristic. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.num_learned_clauses + 0 + >>> l.lit_scores + {-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0} + + >>> l._vsids_clause_added({2, -3}) + + >>> l.num_learned_clauses + 1 + >>> l.lit_scores + {-3: -1.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -2.0} + + """ + self.num_learned_clauses += 1 + for lit in cls: + self.lit_scores[lit] += 1 + + ######################## + # Clause Learning # + ######################## + def _simple_add_learned_clause(self, cls): + """Add a new clause to the theory. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + + >>> l.num_learned_clauses + 0 + >>> l.clauses + [[2, -3], [1], [3, -3], [2, -2], [3, -2]] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4}} + + >>> l._simple_add_learned_clause([3]) + + >>> l.clauses + [[2, -3], [1], [3, -3], [2, -2], [3, -2], [3]] + >>> l.sentinels + {-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4, 5}} + + """ + cls_num = len(self.clauses) + self.clauses.append(cls) + + for lit in cls: + self.occurrence_count[lit] += 1 + + self.sentinels[cls[0]].add(cls_num) + self.sentinels[cls[-1]].add(cls_num) + + self.heur_clause_added(cls) + + def _simple_compute_conflict(self): + """ Build a clause representing the fact that at least one decision made + so far is wrong. + + Examples + ======== + + >>> from sympy.logic.algorithms.dpll2 import SATSolver + >>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2}, + ... {3, -2}], {1, 2, 3}, set()) + >>> next(l._find_model()) + {1: True, 2: False, 3: False} + >>> l._simple_compute_conflict() + [3] + + """ + return [-(level.decision) for level in self.levels[1:]] + + def _simple_clean_clauses(self): + """Clean up learned clauses.""" + pass + + +class Level: + """ + Represents a single level in the DPLL algorithm, and contains + enough information for a sound backtracking procedure. + """ + + def __init__(self, decision, flipped=False): + self.decision = decision + self.var_settings = set() + self.flipped = flipped diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/lra_theory.py b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/lra_theory.py new file mode 100644 index 0000000000000000000000000000000000000000..6ddca1b573746e53ea811af624088c9989f62955 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/lra_theory.py @@ -0,0 +1,915 @@ +"""Implements "A Fast Linear-Arithmetic Solver for DPLL(T)" + +The LRASolver class defined in this file can be used +in conjunction with a SAT solver to check the +satisfiability of formulas involving inequalities. + +Here's an example of how that would work: + + Suppose you want to check the satisfiability of + the following formula: + + >>> from sympy.core.relational import Eq + >>> from sympy.abc import x, y + >>> f = ((x > 0) | (x < 0)) & (Eq(x, 0) | Eq(y, 1)) & (~Eq(y, 1) | Eq(1, 2)) + + First a preprocessing step should be done on f. During preprocessing, + f should be checked for any predicates such as `Q.prime` that can't be + handled. Also unequality like `~Eq(y, 1)` should be split. + + I should mention that the paper says to split both equalities and + unequality, but this implementation only requires that unequality + be split. + + >>> f = ((x > 0) | (x < 0)) & (Eq(x, 0) | Eq(y, 1)) & ((y < 1) | (y > 1) | Eq(1, 2)) + + Then an LRASolver instance needs to be initialized with this formula. + + >>> from sympy.assumptions.cnf import CNF, EncodedCNF + >>> from sympy.assumptions.ask import Q + >>> from sympy.logic.algorithms.lra_theory import LRASolver + >>> cnf = CNF.from_prop(f) + >>> enc = EncodedCNF() + >>> enc.add_from_cnf(cnf) + >>> lra, conflicts = LRASolver.from_encoded_cnf(enc) + + Any immediate one-lital conflicts clauses will be detected here. + In this example, `~Eq(1, 2)` is one such conflict clause. We'll + want to add it to `f` so that the SAT solver is forced to + assign Eq(1, 2) to False. + + >>> f = f & ~Eq(1, 2) + + Now that the one-literal conflict clauses have been added + and an lra object has been initialized, we can pass `f` + to a SAT solver. The SAT solver will give us a satisfying + assignment such as: + + (1 = 2): False + (y = 1): True + (y < 1): True + (y > 1): True + (x = 0): True + (x < 0): True + (x > 0): True + + Next you would pass this assignment to the LRASolver + which will be able to determine that this particular + assignment is satisfiable or not. + + Note that since EncodedCNF is inherently non-deterministic, + the int each predicate is encoded as is not consistent. As a + result, the code bellow likely does not reflect the assignment + given above. + + >>> lra.assert_lit(-1) #doctest: +SKIP + >>> lra.assert_lit(2) #doctest: +SKIP + >>> lra.assert_lit(3) #doctest: +SKIP + >>> lra.assert_lit(4) #doctest: +SKIP + >>> lra.assert_lit(5) #doctest: +SKIP + >>> lra.assert_lit(6) #doctest: +SKIP + >>> lra.assert_lit(7) #doctest: +SKIP + >>> is_sat, conflict_or_assignment = lra.check() + + As the particular assignment suggested is not satisfiable, + the LRASolver will return unsat and a conflict clause when + given that assignment. The conflict clause will always be + minimal, but there can be multiple minimal conflict clauses. + One possible conflict clause could be `~(x < 0) | ~(x > 0)`. + + We would then add whatever conflict clause is given to + `f` to prevent the SAT solver from coming up with an + assignment with the same conflicting literals. In this case, + the conflict clause `~(x < 0) | ~(x > 0)` would prevent + any assignment where both (x < 0) and (x > 0) were both + true. + + The SAT solver would then find another assignment + and we would check that assignment with the LRASolver + and so on. Eventually either a satisfying assignment + that the SAT solver and LRASolver agreed on would be found + or enough conflict clauses would be added so that the + boolean formula was unsatisfiable. + + +This implementation is based on [1]_, which includes a +detailed explanation of the algorithm and pseudocode +for the most important functions. + +[1]_ also explains how backtracking and theory propagation +could be implemented to speed up the current implementation, +but these are not currently implemented. + +TODO: + - Handle non-rational real numbers + - Handle positive and negative infinity + - Implement backtracking and theory proposition + - Simplify matrix by removing unused variables using Gaussian elimination + +References +========== + +.. [1] Dutertre, B., de Moura, L.: + A Fast Linear-Arithmetic Solver for DPLL(T) + https://link.springer.com/chapter/10.1007/11817963_11 +""" +from sympy.solvers.solveset import linear_eq_to_matrix +from sympy.matrices.dense import eye +from sympy.assumptions import Predicate +from sympy.assumptions.assume import AppliedPredicate +from sympy.assumptions.ask import Q +from sympy.core import Dummy +from sympy.core.mul import Mul +from sympy.core.add import Add +from sympy.core.relational import Eq, Ne +from sympy.core.sympify import sympify +from sympy.core.singleton import S +from sympy.core.numbers import Rational, oo +from sympy.matrices.dense import Matrix + +class UnhandledInput(Exception): + """ + Raised while creating an LRASolver if non-linearity + or non-rational numbers are present. + """ + +# predicates that LRASolver understands and makes use of +ALLOWED_PRED = {Q.eq, Q.gt, Q.lt, Q.le, Q.ge} + +# if true ~Q.gt(x, y) implies Q.le(x, y) +HANDLE_NEGATION = True + +class LRASolver(): + """ + Linear Arithmetic Solver for DPLL(T) implemented with an algorithm based on + the Dual Simplex method. Uses Bland's pivoting rule to avoid cycling. + + References + ========== + + .. [1] Dutertre, B., de Moura, L.: + A Fast Linear-Arithmetic Solver for DPLL(T) + https://link.springer.com/chapter/10.1007/11817963_11 + """ + + def __init__(self, A, slack_variables, nonslack_variables, enc_to_boundary, s_subs, testing_mode): + """ + Use the "from_encoded_cnf" method to create a new LRASolver. + """ + self.run_checks = testing_mode + self.s_subs = s_subs # used only for test_lra_theory.test_random_problems + + if any(not isinstance(a, Rational) for a in A): + raise UnhandledInput("Non-rational numbers are not handled") + if any(not isinstance(b.bound, Rational) for b in enc_to_boundary.values()): + raise UnhandledInput("Non-rational numbers are not handled") + m, n = len(slack_variables), len(slack_variables)+len(nonslack_variables) + if m != 0: + assert A.shape == (m, n) + if self.run_checks: + assert A[:, n-m:] == -eye(m) + + self.enc_to_boundary = enc_to_boundary # mapping of int to Boundry objects + self.boundary_to_enc = {value: key for key, value in enc_to_boundary.items()} + self.A = A + self.slack = slack_variables + self.nonslack = nonslack_variables + self.all_var = nonslack_variables + slack_variables + + self.slack_set = set(slack_variables) + + self.is_sat = True # While True, all constraints asserted so far are satisfiable + self.result = None # always one of: (True, assignment), (False, conflict clause), None + + @staticmethod + def from_encoded_cnf(encoded_cnf, testing_mode=False): + """ + Creates an LRASolver from an EncodedCNF object + and a list of conflict clauses for propositions + that can be simplified to True or False. + + Parameters + ========== + + encoded_cnf : EncodedCNF + + testing_mode : bool + Setting testing_mode to True enables some slow assert statements + and sorting to reduce nonterministic behavior. + + Returns + ======= + + (lra, conflicts) + + lra : LRASolver + + conflicts : list + Contains a one-literal conflict clause for each proposition + that can be simplified to True or False. + + Example + ======= + + >>> from sympy.core.relational import Eq + >>> from sympy.assumptions.cnf import CNF, EncodedCNF + >>> from sympy.assumptions.ask import Q + >>> from sympy.logic.algorithms.lra_theory import LRASolver + >>> from sympy.abc import x, y, z + >>> phi = (x >= 0) & ((x + y <= 2) | (x + 2 * y - z >= 6)) + >>> phi = phi & (Eq(x + y, 2) | (x + 2 * y - z > 4)) + >>> phi = phi & Q.gt(2, 1) + >>> cnf = CNF.from_prop(phi) + >>> enc = EncodedCNF() + >>> enc.from_cnf(cnf) + >>> lra, conflicts = LRASolver.from_encoded_cnf(enc, testing_mode=True) + >>> lra #doctest: +SKIP + + >>> conflicts #doctest: +SKIP + [[4]] + """ + # This function has three main jobs: + # - raise errors if the input formula is not handled + # - preprocesses the formula into a matirx and single variable constraints + # - create one-literal conflict clauses from predicates that are always True + # or always False such as Q.gt(3, 2) + # + # See the preprocessing section of "A Fast Linear-Arithmetic Solver for DPLL(T)" + # for an explanation of how the formula is converted into a matrix + # and a set of single variable constraints. + + encoding = {} # maps int to boundary + A = [] + + basic = [] + s_count = 0 + s_subs = {} + nonbasic = [] + + if testing_mode: + # sort to reduce nondeterminism + encoded_cnf_items = sorted(encoded_cnf.encoding.items(), key=lambda x: str(x)) + else: + encoded_cnf_items = encoded_cnf.encoding.items() + + empty_var = Dummy() + var_to_lra_var = {} + conflicts = [] + + for prop, enc in encoded_cnf_items: + if isinstance(prop, Predicate): + prop = prop(empty_var) + if not isinstance(prop, AppliedPredicate): + if prop == True: + conflicts.append([enc]) + continue + if prop == False: + conflicts.append([-enc]) + continue + + raise ValueError(f"Unhandled Predicate: {prop}") + + assert prop.function in ALLOWED_PRED + if prop.lhs == S.NaN or prop.rhs == S.NaN: + raise ValueError(f"{prop} contains nan") + if prop.lhs.is_imaginary or prop.rhs.is_imaginary: + raise UnhandledInput(f"{prop} contains an imaginary component") + if prop.lhs == oo or prop.rhs == oo: + raise UnhandledInput(f"{prop} contains infinity") + + prop = _eval_binrel(prop) # simplify variable-less quantities to True / False if possible + if prop == True: + conflicts.append([enc]) + continue + elif prop == False: + conflicts.append([-enc]) + continue + elif prop is None: + raise UnhandledInput(f"{prop} could not be simplified") + + expr = prop.lhs - prop.rhs + if prop.function in [Q.ge, Q.gt]: + expr = -expr + + # expr should be less than (or equal to) 0 + # otherwise prop is False + if prop.function in [Q.le, Q.ge]: + bool = (expr <= 0) + elif prop.function in [Q.lt, Q.gt]: + bool = (expr < 0) + else: + assert prop.function == Q.eq + bool = Eq(expr, 0) + + if bool == True: + conflicts.append([enc]) + continue + elif bool == False: + conflicts.append([-enc]) + continue + + + vars, const = _sep_const_terms(expr) # example: (2x + 3y + 2) --> (2x + 3y), (2) + vars, var_coeff = _sep_const_coeff(vars) # examples: (2x) --> (x, 2); (2x + 3y) --> (2x + 3y), (1) + const = const / var_coeff + + terms = _list_terms(vars) # example: (2x + 3y) --> [2x, 3y] + for term in terms: + term, _ = _sep_const_coeff(term) + assert len(term.free_symbols) > 0 + if term not in var_to_lra_var: + var_to_lra_var[term] = LRAVariable(term) + nonbasic.append(term) + + if len(terms) > 1: + if vars not in s_subs: + s_count += 1 + d = Dummy(f"s{s_count}") + var_to_lra_var[d] = LRAVariable(d) + basic.append(d) + s_subs[vars] = d + A.append(vars - d) + var = s_subs[vars] + else: + var = terms[0] + + assert var_coeff != 0 + + equality = prop.function == Q.eq + upper = var_coeff > 0 if not equality else None + strict = prop.function in [Q.gt, Q.lt] + b = Boundary(var_to_lra_var[var], -const, upper, equality, strict) + encoding[enc] = b + + fs = [v.free_symbols for v in nonbasic + basic] + assert all(len(syms) > 0 for syms in fs) + fs_count = sum(len(syms) for syms in fs) + if len(fs) > 0 and len(set.union(*fs)) < fs_count: + raise UnhandledInput("Nonlinearity is not handled") + + A, _ = linear_eq_to_matrix(A, nonbasic + basic) + nonbasic = [var_to_lra_var[nb] for nb in nonbasic] + basic = [var_to_lra_var[b] for b in basic] + for idx, var in enumerate(nonbasic + basic): + var.col_idx = idx + + return LRASolver(A, basic, nonbasic, encoding, s_subs, testing_mode), conflicts + + def reset_bounds(self): + """ + Resets the state of the LRASolver to before + anything was asserted. + """ + self.result = None + for var in self.all_var: + var.lower = LRARational(-float("inf"), 0) + var.lower_from_eq = False + var.lower_from_neg = False + var.upper = LRARational(float("inf"), 0) + var.upper_from_eq= False + var.lower_from_neg = False + var.assign = LRARational(0, 0) + + def assert_lit(self, enc_constraint): + """ + Assert a literal representing a constraint + and update the internal state accordingly. + + Note that due to peculiarities of this implementation + asserting ~(x > 0) will assert (x <= 0) but asserting + ~Eq(x, 0) will not do anything. + + Parameters + ========== + + enc_constraint : int + A mapping of encodings to constraints + can be found in `self.enc_to_boundary`. + + Returns + ======= + + None or (False, explanation) + + explanation : set of ints + A conflict clause that "explains" why + the literals asserted so far are unsatisfiable. + """ + if abs(enc_constraint) not in self.enc_to_boundary: + return None + + if not HANDLE_NEGATION and enc_constraint < 0: + return None + + boundary = self.enc_to_boundary[abs(enc_constraint)] + sym, c, negated = boundary.var, boundary.bound, enc_constraint < 0 + + if boundary.equality and negated: + return None # negated equality is not handled and should only appear in conflict clauses + + upper = boundary.upper != negated + if boundary.strict != negated: + delta = -1 if upper else 1 + c = LRARational(c, delta) + else: + c = LRARational(c, 0) + + if boundary.equality: + res1 = self._assert_lower(sym, c, from_equality=True, from_neg=negated) + if res1 and res1[0] == False: + res = res1 + else: + res2 = self._assert_upper(sym, c, from_equality=True, from_neg=negated) + res = res2 + elif upper: + res = self._assert_upper(sym, c, from_neg=negated) + else: + res = self._assert_lower(sym, c, from_neg=negated) + + if self.is_sat and sym not in self.slack_set: + self.is_sat = res is None + else: + self.is_sat = False + + return res + + def _assert_upper(self, xi, ci, from_equality=False, from_neg=False): + """ + Adjusts the upper bound on variable xi if the new upper bound is + more limiting. The assignment of variable xi is adjusted to be + within the new bound if needed. + + Also calls `self._update` to update the assignment for slack variables + to keep all equalities satisfied. + """ + if self.result: + assert self.result[0] != False + self.result = None + if ci >= xi.upper: + return None + if ci < xi.lower: + assert (xi.lower[1] >= 0) is True + assert (ci[1] <= 0) is True + + lit1, neg1 = Boundary.from_lower(xi) + + lit2 = Boundary(var=xi, const=ci[0], strict=ci[1] != 0, upper=True, equality=from_equality) + if from_neg: + lit2 = lit2.get_negated() + neg2 = -1 if from_neg else 1 + + conflict = [-neg1*self.boundary_to_enc[lit1], -neg2*self.boundary_to_enc[lit2]] + self.result = False, conflict + return self.result + xi.upper = ci + xi.upper_from_eq = from_equality + xi.upper_from_neg = from_neg + if xi in self.nonslack and xi.assign > ci: + self._update(xi, ci) + + if self.run_checks and all(v.assign[0] != float("inf") and v.assign[0] != -float("inf") + for v in self.all_var): + M = self.A + X = Matrix([v.assign[0] for v in self.all_var]) + assert all(abs(val) < 10 ** (-10) for val in M * X) + + return None + + def _assert_lower(self, xi, ci, from_equality=False, from_neg=False): + """ + Adjusts the lower bound on variable xi if the new lower bound is + more limiting. The assignment of variable xi is adjusted to be + within the new bound if needed. + + Also calls `self._update` to update the assignment for slack variables + to keep all equalities satisfied. + """ + if self.result: + assert self.result[0] != False + self.result = None + if ci <= xi.lower: + return None + if ci > xi.upper: + assert (xi.upper[1] <= 0) is True + assert (ci[1] >= 0) is True + + lit1, neg1 = Boundary.from_upper(xi) + + lit2 = Boundary(var=xi, const=ci[0], strict=ci[1] != 0, upper=False, equality=from_equality) + if from_neg: + lit2 = lit2.get_negated() + neg2 = -1 if from_neg else 1 + + conflict = [-neg1*self.boundary_to_enc[lit1],-neg2*self.boundary_to_enc[lit2]] + self.result = False, conflict + return self.result + xi.lower = ci + xi.lower_from_eq = from_equality + xi.lower_from_neg = from_neg + if xi in self.nonslack and xi.assign < ci: + self._update(xi, ci) + + if self.run_checks and all(v.assign[0] != float("inf") and v.assign[0] != -float("inf") + for v in self.all_var): + M = self.A + X = Matrix([v.assign[0] for v in self.all_var]) + assert all(abs(val) < 10 ** (-10) for val in M * X) + + return None + + def _update(self, xi, v): + """ + Updates all slack variables that have equations that contain + variable xi so that they stay satisfied given xi is equal to v. + """ + i = xi.col_idx + for j, b in enumerate(self.slack): + aji = self.A[j, i] + b.assign = b.assign + (v - xi.assign)*aji + xi.assign = v + + def check(self): + """ + Searches for an assignment that satisfies all constraints + or determines that no such assignment exists and gives + a minimal conflict clause that "explains" why the + constraints are unsatisfiable. + + Returns + ======= + + (True, assignment) or (False, explanation) + + assignment : dict of LRAVariables to values + Assigned values are tuples that represent a rational number + plus some infinatesimal delta. + + explanation : set of ints + """ + if self.is_sat: + return True, {var: var.assign for var in self.all_var} + if self.result: + return self.result + + from sympy.matrices.dense import Matrix + M = self.A.copy() + basic = {s: i for i, s in enumerate(self.slack)} # contains the row index associated with each basic variable + nonbasic = set(self.nonslack) + iteration = 0 + while True: + iteration += 1 + + if self.run_checks: + # nonbasic variables must always be within bounds + assert all(((nb.assign >= nb.lower) == True) and ((nb.assign <= nb.upper) == True) for nb in nonbasic) + + # assignments for x must always satisfy Ax = 0 + # probably have to turn this off when dealing with strict ineq + if all(v.assign[0] != float("inf") and v.assign[0] != -float("inf") + for v in self.all_var): + X = Matrix([v.assign[0] for v in self.all_var]) + assert all(abs(val) < 10**(-10) for val in M*X) + + # check upper and lower match this format: + # x <= rat + delta iff x < rat + # x >= rat - delta iff x > rat + # this wouldn't make sense: + # x <= rat - delta + # x >= rat + delta + assert all(x.upper[1] <= 0 for x in self.all_var) + assert all(x.lower[1] >= 0 for x in self.all_var) + + cand = [b for b in basic if b.assign < b.lower or b.assign > b.upper] + + if len(cand) == 0: + return True, {var: var.assign for var in self.all_var} + + xi = min(cand, key=lambda v: v.col_idx) # Bland's rule + i = basic[xi] + + if xi.assign < xi.lower: + cand = [nb for nb in nonbasic + if (M[i, nb.col_idx] > 0 and nb.assign < nb.upper) + or (M[i, nb.col_idx] < 0 and nb.assign > nb.lower)] + if len(cand) == 0: + N_plus = [nb for nb in nonbasic if M[i, nb.col_idx] > 0] + N_minus = [nb for nb in nonbasic if M[i, nb.col_idx] < 0] + + conflict = [] + conflict += [Boundary.from_upper(nb) for nb in N_plus] + conflict += [Boundary.from_lower(nb) for nb in N_minus] + conflict.append(Boundary.from_lower(xi)) + conflict = [-neg*self.boundary_to_enc[c] for c, neg in conflict] + return False, conflict + xj = min(cand, key=str) + M = self._pivot_and_update(M, basic, nonbasic, xi, xj, xi.lower) + + if xi.assign > xi.upper: + cand = [nb for nb in nonbasic + if (M[i, nb.col_idx] < 0 and nb.assign < nb.upper) + or (M[i, nb.col_idx] > 0 and nb.assign > nb.lower)] + + if len(cand) == 0: + N_plus = [nb for nb in nonbasic if M[i, nb.col_idx] > 0] + N_minus = [nb for nb in nonbasic if M[i, nb.col_idx] < 0] + + conflict = [] + conflict += [Boundary.from_upper(nb) for nb in N_minus] + conflict += [Boundary.from_lower(nb) for nb in N_plus] + conflict.append(Boundary.from_upper(xi)) + + conflict = [-neg*self.boundary_to_enc[c] for c, neg in conflict] + return False, conflict + xj = min(cand, key=lambda v: v.col_idx) + M = self._pivot_and_update(M, basic, nonbasic, xi, xj, xi.upper) + + def _pivot_and_update(self, M, basic, nonbasic, xi, xj, v): + """ + Pivots basic variable xi with nonbasic variable xj, + and sets value of xi to v and adjusts the values of all basic variables + to keep equations satisfied. + """ + i, j = basic[xi], xj.col_idx + assert M[i, j] != 0 + theta = (v - xi.assign)*(1/M[i, j]) + xi.assign = v + xj.assign = xj.assign + theta + for xk in basic: + if xk != xi: + k = basic[xk] + akj = M[k, j] + xk.assign = xk.assign + theta*akj + # pivot + basic[xj] = basic[xi] + del basic[xi] + nonbasic.add(xi) + nonbasic.remove(xj) + return self._pivot(M, i, j) + + @staticmethod + def _pivot(M, i, j): + """ + Performs a pivot operation about entry i, j of M by performing + a series of row operations on a copy of M and returing the result. + The original M is left unmodified. + + Conceptually, M represents a system of equations and pivoting + can be thought of as rearranging equation i to be in terms of + variable j and then substituting in the rest of the equations + to get rid of other occurances of variable j. + + Example + ======= + + >>> from sympy.matrices.dense import Matrix + >>> from sympy.logic.algorithms.lra_theory import LRASolver + >>> from sympy import var + >>> Matrix(3, 3, var('a:i')) + Matrix([ + [a, b, c], + [d, e, f], + [g, h, i]]) + + This matrix is equivalent to: + 0 = a*x + b*y + c*z + 0 = d*x + e*y + f*z + 0 = g*x + h*y + i*z + + >>> LRASolver._pivot(_, 1, 0) + Matrix([ + [ 0, -a*e/d + b, -a*f/d + c], + [-1, -e/d, -f/d], + [ 0, h - e*g/d, i - f*g/d]]) + + We rearrange equation 1 in terms of variable 0 (x) + and substitute to remove x from the other equations. + + 0 = 0 + (-a*e/d + b)*y + (-a*f/d + c)*z + 0 = -x + (-e/d)*y + (-f/d)*z + 0 = 0 + (h - e*g/d)*y + (i - f*g/d)*z + """ + _, _, Mij = M[i, :], M[:, j], M[i, j] + if Mij == 0: + raise ZeroDivisionError("Tried to pivot about zero-valued entry.") + A = M.copy() + A[i, :] = -A[i, :]/Mij + for row in range(M.shape[0]): + if row != i: + A[row, :] = A[row, :] + A[row, j] * A[i, :] + + return A + + +def _sep_const_coeff(expr): + """ + Example + ======= + + >>> from sympy.logic.algorithms.lra_theory import _sep_const_coeff + >>> from sympy.abc import x, y + >>> _sep_const_coeff(2*x) + (x, 2) + >>> _sep_const_coeff(2*x + 3*y) + (2*x + 3*y, 1) + """ + if isinstance(expr, Add): + return expr, sympify(1) + + if isinstance(expr, Mul): + coeffs = expr.args + else: + coeffs = [expr] + + var, const = [], [] + for c in coeffs: + c = sympify(c) + if len(c.free_symbols)==0: + const.append(c) + else: + var.append(c) + return Mul(*var), Mul(*const) + + +def _list_terms(expr): + if not isinstance(expr, Add): + return [expr] + + return expr.args + + +def _sep_const_terms(expr): + """ + Example + ======= + + >>> from sympy.logic.algorithms.lra_theory import _sep_const_terms + >>> from sympy.abc import x, y + >>> _sep_const_terms(2*x + 3*y + 2) + (2*x + 3*y, 2) + """ + if isinstance(expr, Add): + terms = expr.args + else: + terms = [expr] + + var, const = [], [] + for t in terms: + if len(t.free_symbols) == 0: + const.append(t) + else: + var.append(t) + return sum(var), sum(const) + + +def _eval_binrel(binrel): + """ + Simplify binary relation to True / False if possible. + """ + if not (len(binrel.lhs.free_symbols) == 0 and len(binrel.rhs.free_symbols) == 0): + return binrel + if binrel.function == Q.lt: + res = binrel.lhs < binrel.rhs + elif binrel.function == Q.gt: + res = binrel.lhs > binrel.rhs + elif binrel.function == Q.le: + res = binrel.lhs <= binrel.rhs + elif binrel.function == Q.ge: + res = binrel.lhs >= binrel.rhs + elif binrel.function == Q.eq: + res = Eq(binrel.lhs, binrel.rhs) + elif binrel.function == Q.ne: + res = Ne(binrel.lhs, binrel.rhs) + + if res == True or res == False: + return res + else: + return None + + +class Boundary: + """ + Represents an upper or lower bound or an equality between a symbol + and some constant. + """ + def __init__(self, var, const, upper, equality, strict=None): + if not equality in [True, False]: + assert equality in [True, False] + + + self.var = var + if isinstance(const, tuple): + s = const[1] != 0 + if strict: + assert s == strict + self.bound = const[0] + self.strict = s + else: + self.bound = const + self.strict = strict + self.upper = upper if not equality else None + self.equality = equality + self.strict = strict + assert self.strict is not None + + @staticmethod + def from_upper(var): + neg = -1 if var.upper_from_neg else 1 + b = Boundary(var, var.upper[0], True, var.upper_from_eq, var.upper[1] != 0) + if neg < 0: + b = b.get_negated() + return b, neg + + @staticmethod + def from_lower(var): + neg = -1 if var.lower_from_neg else 1 + b = Boundary(var, var.lower[0], False, var.lower_from_eq, var.lower[1] != 0) + if neg < 0: + b = b.get_negated() + return b, neg + + def get_negated(self): + return Boundary(self.var, self.bound, not self.upper, self.equality, not self.strict) + + def get_inequality(self): + if self.equality: + return Eq(self.var.var, self.bound) + elif self.upper and self.strict: + return self.var.var < self.bound + elif not self.upper and self.strict: + return self.var.var > self.bound + elif self.upper: + return self.var.var <= self.bound + else: + return self.var.var >= self.bound + + def __repr__(self): + return repr("Boundry(" + repr(self.get_inequality()) + ")") + + def __eq__(self, other): + other = (other.var, other.bound, other.strict, other.upper, other.equality) + return (self.var, self.bound, self.strict, self.upper, self.equality) == other + + def __hash__(self): + return hash((self.var, self.bound, self.strict, self.upper, self.equality)) + + +class LRARational(): + """ + Represents a rational plus or minus some amount + of arbitrary small deltas. + """ + def __init__(self, rational, delta): + self.value = (rational, delta) + + def __lt__(self, other): + return self.value < other.value + + def __le__(self, other): + return self.value <= other.value + + def __eq__(self, other): + return self.value == other.value + + def __add__(self, other): + return LRARational(self.value[0] + other.value[0], self.value[1] + other.value[1]) + + def __sub__(self, other): + return LRARational(self.value[0] - other.value[0], self.value[1] - other.value[1]) + + def __mul__(self, other): + assert not isinstance(other, LRARational) + return LRARational(self.value[0] * other, self.value[1] * other) + + def __getitem__(self, index): + return self.value[index] + + def __repr__(self): + return repr(self.value) + + +class LRAVariable(): + """ + Object to keep track of upper and lower bounds + on `self.var`. + """ + def __init__(self, var): + self.upper = LRARational(float("inf"), 0) + self.upper_from_eq = False + self.upper_from_neg = False + self.lower = LRARational(-float("inf"), 0) + self.lower_from_eq = False + self.lower_from_neg = False + self.assign = LRARational(0,0) + self.var = var + self.col_idx = None + + def __repr__(self): + return repr(self.var) + + def __eq__(self, other): + if not isinstance(other, LRAVariable): + return False + return other.var == self.var + + def __hash__(self): + return hash(self.var) diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/minisat22_wrapper.py b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/minisat22_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..1d5c1f8f14f04309f7cb8197cc05d01a3c108545 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/minisat22_wrapper.py @@ -0,0 +1,46 @@ +from sympy.assumptions.cnf import EncodedCNF + +def minisat22_satisfiable(expr, all_models=False, minimal=False): + + if not isinstance(expr, EncodedCNF): + exprs = EncodedCNF() + exprs.add_prop(expr) + expr = exprs + + from pysat.solvers import Minisat22 + + # Return UNSAT when False (encoded as 0) is present in the CNF + if {0} in expr.data: + if all_models: + return (f for f in [False]) + return False + + r = Minisat22(expr.data) + + if minimal: + r.set_phases([-(i+1) for i in range(r.nof_vars())]) + + if not r.solve(): + return False + + if not all_models: + return {expr.symbols[abs(lit) - 1]: lit > 0 for lit in r.get_model()} + + else: + # Make solutions SymPy compatible by creating a generator + def _gen(results): + satisfiable = False + while results.solve(): + sol = results.get_model() + yield {expr.symbols[abs(lit) - 1]: lit > 0 for lit in sol} + if minimal: + results.add_clause([-i for i in sol if i>0]) + else: + results.add_clause([-i for i in sol]) + satisfiable = True + if not satisfiable: + yield False + raise StopIteration + + + return _gen(r) diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/pycosat_wrapper.py b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/pycosat_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff498b7e3f6b73d95e9b949598ef32df4ecf226 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/algorithms/pycosat_wrapper.py @@ -0,0 +1,41 @@ +from sympy.assumptions.cnf import EncodedCNF + + +def pycosat_satisfiable(expr, all_models=False): + import pycosat + if not isinstance(expr, EncodedCNF): + exprs = EncodedCNF() + exprs.add_prop(expr) + expr = exprs + + # Return UNSAT when False (encoded as 0) is present in the CNF + if {0} in expr.data: + if all_models: + return (f for f in [False]) + return False + + if not all_models: + r = pycosat.solve(expr.data) + result = (r != "UNSAT") + if not result: + return result + return {expr.symbols[abs(lit) - 1]: lit > 0 for lit in r} + else: + r = pycosat.itersolve(expr.data) + result = (r != "UNSAT") + if not result: + return result + + # Make solutions SymPy compatible by creating a generator + def _gen(results): + satisfiable = False + try: + while True: + sol = next(results) + yield {expr.symbols[abs(lit) - 1]: lit > 0 for lit in sol} + satisfiable = True + except StopIteration: + if not satisfiable: + yield False + + return _gen(r) diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/__init__.py b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5258b7400115ca70c425c84530103e84f7f6568c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_boolalg.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e2b8b219d53a9ba1d311e62bbdefea94df97fa1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_dimacs.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_lra_theory.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_lra_theory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..219595608a3705ccce24bd915bd5cb3ce13c16ab Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/logic/tests/__pycache__/test_lra_theory.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_boolalg.py b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_boolalg.py new file mode 100644 index 0000000000000000000000000000000000000000..53de73f190caa663d790acc0d9534a2ea2824404 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_boolalg.py @@ -0,0 +1,1352 @@ +from sympy.assumptions.ask import Q +from sympy.assumptions.refine import refine +from sympy.core.numbers import oo +from sympy.core.relational import Equality, Eq, Ne +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, symbols) +from sympy.functions import Piecewise +from sympy.functions.elementary.trigonometric import cos, sin +from sympy.sets.sets import Interval, Union +from sympy.sets.contains import Contains +from sympy.simplify.simplify import simplify +from sympy.logic.boolalg import ( + And, Boolean, Equivalent, ITE, Implies, Nand, Nor, Not, Or, + POSform, SOPform, Xor, Xnor, conjuncts, disjuncts, + distribute_or_over_and, distribute_and_over_or, + eliminate_implications, is_nnf, is_cnf, is_dnf, simplify_logic, + to_nnf, to_cnf, to_dnf, to_int_repr, bool_map, true, false, + BooleanAtom, is_literal, term_to_integer, + truth_table, as_Boolean, to_anf, is_anf, distribute_xor_over_and, + anf_coeffs, ANFform, bool_minterm, bool_maxterm, bool_monomial, + _check_pair, _convert_to_varsSOP, _convert_to_varsPOS, Exclusive, + gateinputcount) +from sympy.assumptions.cnf import CNF + +from sympy.testing.pytest import raises, XFAIL, slow + +from itertools import combinations, permutations, product + +A, B, C, D = symbols('A:D') +a, b, c, d, e, w, x, y, z = symbols('a:e w:z') + + +def test_overloading(): + """Test that |, & are overloaded as expected""" + + assert A & B == And(A, B) + assert A | B == Or(A, B) + assert (A & B) | C == Or(And(A, B), C) + assert A >> B == Implies(A, B) + assert A << B == Implies(B, A) + assert ~A == Not(A) + assert A ^ B == Xor(A, B) + + +def test_And(): + assert And() is true + assert And(A) == A + assert And(True) is true + assert And(False) is false + assert And(True, True) is true + assert And(True, False) is false + assert And(False, False) is false + assert And(True, A) == A + assert And(False, A) is false + assert And(True, True, True) is true + assert And(True, True, A) == A + assert And(True, False, A) is false + assert And(1, A) == A + raises(TypeError, lambda: And(2, A)) + assert And(A < 1, A >= 1) is false + e = A > 1 + assert And(e, e.canonical) == e.canonical + g, l, ge, le = A > B, B < A, A >= B, B <= A + assert And(g, l, ge, le) == And(ge, g) + assert {And(*i) for i in permutations((l,g,le,ge))} == {And(ge, g)} + assert And(And(Eq(a, 0), Eq(b, 0)), And(Ne(a, 0), Eq(c, 0))) is false + + +def test_Or(): + assert Or() is false + assert Or(A) == A + assert Or(True) is true + assert Or(False) is false + assert Or(True, True) is true + assert Or(True, False) is true + assert Or(False, False) is false + assert Or(True, A) is true + assert Or(False, A) == A + assert Or(True, False, False) is true + assert Or(True, False, A) is true + assert Or(False, False, A) == A + assert Or(1, A) is true + raises(TypeError, lambda: Or(2, A)) + assert Or(A < 1, A >= 1) is true + e = A > 1 + assert Or(e, e.canonical) == e + g, l, ge, le = A > B, B < A, A >= B, B <= A + assert Or(g, l, ge, le) == Or(g, ge) + + +def test_Xor(): + assert Xor() is false + assert Xor(A) == A + assert Xor(A, A) is false + assert Xor(True, A, A) is true + assert Xor(A, A, A, A, A) == A + assert Xor(True, False, False, A, B) == ~Xor(A, B) + assert Xor(True) is true + assert Xor(False) is false + assert Xor(True, True) is false + assert Xor(True, False) is true + assert Xor(False, False) is false + assert Xor(True, A) == ~A + assert Xor(False, A) == A + assert Xor(True, False, False) is true + assert Xor(True, False, A) == ~A + assert Xor(False, False, A) == A + assert isinstance(Xor(A, B), Xor) + assert Xor(A, B, Xor(C, D)) == Xor(A, B, C, D) + assert Xor(A, B, Xor(B, C)) == Xor(A, C) + assert Xor(A < 1, A >= 1, B) == Xor(0, 1, B) == Xor(1, 0, B) + e = A > 1 + assert Xor(e, e.canonical) == Xor(0, 0) == Xor(1, 1) + + +def test_rewrite_as_And(): + expr = x ^ y + assert expr.rewrite(And) == (x | y) & (~x | ~y) + + +def test_rewrite_as_Or(): + expr = x ^ y + assert expr.rewrite(Or) == (x & ~y) | (y & ~x) + + +def test_rewrite_as_Nand(): + expr = (y & z) | (z & ~w) + assert expr.rewrite(Nand) == ~(~(y & z) & ~(z & ~w)) + + +def test_rewrite_as_Nor(): + expr = z & (y | ~w) + assert expr.rewrite(Nor) == ~(~z | ~(y | ~w)) + + +def test_Not(): + raises(TypeError, lambda: Not(True, False)) + assert Not(True) is false + assert Not(False) is true + assert Not(0) is true + assert Not(1) is false + assert Not(2) is false + + +def test_Nand(): + assert Nand() is false + assert Nand(A) == ~A + assert Nand(True) is false + assert Nand(False) is true + assert Nand(True, True) is false + assert Nand(True, False) is true + assert Nand(False, False) is true + assert Nand(True, A) == ~A + assert Nand(False, A) is true + assert Nand(True, True, True) is false + assert Nand(True, True, A) == ~A + assert Nand(True, False, A) is true + + +def test_Nor(): + assert Nor() is true + assert Nor(A) == ~A + assert Nor(True) is false + assert Nor(False) is true + assert Nor(True, True) is false + assert Nor(True, False) is false + assert Nor(False, False) is true + assert Nor(True, A) is false + assert Nor(False, A) == ~A + assert Nor(True, True, True) is false + assert Nor(True, True, A) is false + assert Nor(True, False, A) is false + + +def test_Xnor(): + assert Xnor() is true + assert Xnor(A) == ~A + assert Xnor(A, A) is true + assert Xnor(True, A, A) is false + assert Xnor(A, A, A, A, A) == ~A + assert Xnor(True) is false + assert Xnor(False) is true + assert Xnor(True, True) is true + assert Xnor(True, False) is false + assert Xnor(False, False) is true + assert Xnor(True, A) == A + assert Xnor(False, A) == ~A + assert Xnor(True, False, False) is false + assert Xnor(True, False, A) == A + assert Xnor(False, False, A) == ~A + + +def test_Implies(): + raises(ValueError, lambda: Implies(A, B, C)) + assert Implies(True, True) is true + assert Implies(True, False) is false + assert Implies(False, True) is true + assert Implies(False, False) is true + assert Implies(0, A) is true + assert Implies(1, 1) is true + assert Implies(1, 0) is false + assert A >> B == B << A + assert (A < 1) >> (A >= 1) == (A >= 1) + assert (A < 1) >> (S.One > A) is true + assert A >> A is true + + +def test_Equivalent(): + assert Equivalent(A, B) == Equivalent(B, A) == Equivalent(A, B, A) + assert Equivalent() is true + assert Equivalent(A, A) == Equivalent(A) is true + assert Equivalent(True, True) == Equivalent(False, False) is true + assert Equivalent(True, False) == Equivalent(False, True) is false + assert Equivalent(A, True) == A + assert Equivalent(A, False) == Not(A) + assert Equivalent(A, B, True) == A & B + assert Equivalent(A, B, False) == ~A & ~B + assert Equivalent(1, A) == A + assert Equivalent(0, A) == Not(A) + assert Equivalent(A, Equivalent(B, C)) != Equivalent(Equivalent(A, B), C) + assert Equivalent(A < 1, A >= 1) is false + assert Equivalent(A < 1, A >= 1, 0) is false + assert Equivalent(A < 1, A >= 1, 1) is false + assert Equivalent(A < 1, S.One > A) == Equivalent(1, 1) == Equivalent(0, 0) + assert Equivalent(Equality(A, B), Equality(B, A)) is true + + +def test_Exclusive(): + assert Exclusive(False, False, False) is true + assert Exclusive(True, False, False) is true + assert Exclusive(True, True, False) is false + assert Exclusive(True, True, True) is false + + +def test_equals(): + assert Not(Or(A, B)).equals(And(Not(A), Not(B))) is True + assert Equivalent(A, B).equals((A >> B) & (B >> A)) is True + assert ((A | ~B) & (~A | B)).equals((~A & ~B) | (A & B)) is True + assert (A >> B).equals(~A >> ~B) is False + assert (A >> (B >> A)).equals(A >> (C >> A)) is False + raises(NotImplementedError, lambda: (A & B).equals(A > B)) + + +def test_simplification_boolalg(): + """ + Test working of simplification methods. + """ + set1 = [[0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 1, 0]] + set2 = [[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1]] + assert SOPform([x, y, z], set1) == Or(And(Not(x), z), And(Not(z), x)) + assert Not(SOPform([x, y, z], set2)) == \ + Not(Or(And(Not(x), Not(z)), And(x, z))) + assert POSform([x, y, z], set1 + set2) is true + assert SOPform([x, y, z], set1 + set2) is true + assert SOPform([Dummy(), Dummy(), Dummy()], set1 + set2) is true + + minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], + [1, 1, 1, 1]] + dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + minterms = [1, 3, 7, 11, 15] + dontcares = [0, 2, 5] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + minterms = [1, [0, 0, 1, 1], 7, [1, 0, 1, 1], + [1, 1, 1, 1]] + dontcares = [0, [0, 0, 1, 0], 5] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + minterms = [1, {y: 1, z: 1}] + dontcares = [0, [0, 0, 1, 0], 5] + assert ( + SOPform([w, x, y, z], minterms, dontcares) == + Or(And(y, z), And(Not(w), Not(x)))) + assert POSform([w, x, y, z], minterms, dontcares) == And(Or(Not(w), y), z) + + + minterms = [{y: 1, z: 1}, 1] + dontcares = [[0, 0, 0, 0]] + + minterms = [[0, 0, 0]] + raises(ValueError, lambda: SOPform([w, x, y, z], minterms)) + raises(ValueError, lambda: POSform([w, x, y, z], minterms)) + + raises(TypeError, lambda: POSform([w, x, y, z], ["abcdefg"])) + + # test simplification + ans = And(A, Or(B, C)) + assert simplify_logic(A & (B | C)) == ans + assert simplify_logic((A & B) | (A & C)) == ans + assert simplify_logic(Implies(A, B)) == Or(Not(A), B) + assert simplify_logic(Equivalent(A, B)) == \ + Or(And(A, B), And(Not(A), Not(B))) + assert simplify_logic(And(Equality(A, 2), C)) == And(Equality(A, 2), C) + assert simplify_logic(And(Equality(A, 2), A)) == And(Equality(A, 2), A) + assert simplify_logic(And(Equality(A, B), C)) == And(Equality(A, B), C) + assert simplify_logic(Or(And(Equality(A, 3), B), And(Equality(A, 3), C))) \ + == And(Equality(A, 3), Or(B, C)) + b = (~x & ~y & ~z) | (~x & ~y & z) + e = And(A, b) + assert simplify_logic(e) == A & ~x & ~y + raises(ValueError, lambda: simplify_logic(A & (B | C), form='blabla')) + assert simplify(Or(x <= y, And(x < y, z))) == (x <= y) + assert simplify(Or(x <= y, And(y > x, z))) == (x <= y) + assert simplify(Or(x >= y, And(y < x, z))) == (x >= y) + + # Check that expressions with nine variables or more are not simplified + # (without the force-flag) + a, b, c, d, e, f, g, h, j = symbols('a b c d e f g h j') + expr = a & b & c & d & e & f & g & h & j | \ + a & b & c & d & e & f & g & h & ~j + # This expression can be simplified to get rid of the j variables + assert simplify_logic(expr) == expr + + # Test dontcare + assert simplify_logic((a & b) | c | d, dontcare=(a & b)) == c | d + + # check input + ans = SOPform([x, y], [[1, 0]]) + assert SOPform([x, y], [[1, 0]]) == ans + assert POSform([x, y], [[1, 0]]) == ans + + raises(ValueError, lambda: SOPform([x], [[1]], [[1]])) + assert SOPform([x], [[1]], [[0]]) is true + assert SOPform([x], [[0]], [[1]]) is true + assert SOPform([x], [], []) is false + + raises(ValueError, lambda: POSform([x], [[1]], [[1]])) + assert POSform([x], [[1]], [[0]]) is true + assert POSform([x], [[0]], [[1]]) is true + assert POSform([x], [], []) is false + + # check working of simplify + assert simplify((A & B) | (A & C)) == And(A, Or(B, C)) + assert simplify(And(x, Not(x))) == False + assert simplify(Or(x, Not(x))) == True + assert simplify(And(Eq(x, 0), Eq(x, y))) == And(Eq(x, 0), Eq(y, 0)) + assert And(Eq(x - 1, 0), Eq(x, y)).simplify() == And(Eq(x, 1), Eq(y, 1)) + assert And(Ne(x - 1, 0), Ne(x, y)).simplify() == And(Ne(x, 1), Ne(x, y)) + assert And(Eq(x - 1, 0), Ne(x, y)).simplify() == And(Eq(x, 1), Ne(y, 1)) + assert And(Eq(x - 1, 0), Eq(x, z + y), Eq(y + x, 0)).simplify( + ) == And(Eq(x, 1), Eq(y, -1), Eq(z, 2)) + assert And(Eq(x - 1, 0), Eq(x + 2, 3)).simplify() == Eq(x, 1) + assert And(Ne(x - 1, 0), Ne(x + 2, 3)).simplify() == Ne(x, 1) + assert And(Eq(x - 1, 0), Eq(x + 2, 2)).simplify() == False + assert And(Ne(x - 1, 0), Ne(x + 2, 2)).simplify( + ) == And(Ne(x, 1), Ne(x, 0)) + assert simplify(Xor(x, ~x)) == True + + +def test_bool_map(): + """ + Test working of bool_map function. + """ + + minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], + [1, 1, 1, 1]] + assert bool_map(Not(Not(a)), a) == (a, {a: a}) + assert bool_map(SOPform([w, x, y, z], minterms), + POSform([w, x, y, z], minterms)) == \ + (And(Or(Not(w), y), Or(Not(x), y), z), {x: x, w: w, z: z, y: y}) + assert bool_map(SOPform([x, z, y], [[1, 0, 1]]), + SOPform([a, b, c], [[1, 0, 1]])) != False + function1 = SOPform([x, z, y], [[1, 0, 1], [0, 0, 1]]) + function2 = SOPform([a, b, c], [[1, 0, 1], [1, 0, 0]]) + assert bool_map(function1, function2) == \ + (function1, {y: a, z: b}) + assert bool_map(Xor(x, y), ~Xor(x, y)) == False + assert bool_map(And(x, y), Or(x, y)) is None + assert bool_map(And(x, y), And(x, y, z)) is None + # issue 16179 + assert bool_map(Xor(x, y, z), ~Xor(x, y, z)) == False + assert bool_map(Xor(a, x, y, z), ~Xor(a, x, y, z)) == False + + +def test_bool_symbol(): + """Test that mixing symbols with boolean values + works as expected""" + + assert And(A, True) == A + assert And(A, True, True) == A + assert And(A, False) is false + assert And(A, True, False) is false + assert Or(A, True) is true + assert Or(A, False) == A + + +def test_is_boolean(): + assert isinstance(True, Boolean) is False + assert isinstance(true, Boolean) is True + assert 1 == True + assert 1 != true + assert (1 == true) is False + assert 0 == False + assert 0 != false + assert (0 == false) is False + assert true.is_Boolean is True + assert (A & B).is_Boolean + assert (A | B).is_Boolean + assert (~A).is_Boolean + assert (A ^ B).is_Boolean + assert A.is_Boolean != isinstance(A, Boolean) + assert isinstance(A, Boolean) + + +def test_subs(): + assert (A & B).subs(A, True) == B + assert (A & B).subs(A, False) is false + assert (A & B).subs(B, True) == A + assert (A & B).subs(B, False) is false + assert (A & B).subs({A: True, B: True}) is true + assert (A | B).subs(A, True) is true + assert (A | B).subs(A, False) == B + assert (A | B).subs(B, True) is true + assert (A | B).subs(B, False) == A + assert (A | B).subs({A: True, B: True}) is true + + +""" +we test for axioms of boolean algebra +see https://en.wikipedia.org/wiki/Boolean_algebra_(structure) +""" + + +def test_commutative(): + """Test for commutativity of And and Or""" + A, B = map(Boolean, symbols('A,B')) + + assert A & B == B & A + assert A | B == B | A + + +def test_and_associativity(): + """Test for associativity of And""" + + assert (A & B) & C == A & (B & C) + + +def test_or_assicativity(): + assert ((A | B) | C) == (A | (B | C)) + + +def test_double_negation(): + a = Boolean() + assert ~(~a) == a + + +# test methods + +def test_eliminate_implications(): + assert eliminate_implications(Implies(A, B, evaluate=False)) == (~A) | B + assert eliminate_implications( + A >> (C >> Not(B))) == Or(Or(Not(B), Not(C)), Not(A)) + assert eliminate_implications(Equivalent(A, B, C, D)) == \ + (~A | B) & (~B | C) & (~C | D) & (~D | A) + + +def test_conjuncts(): + assert conjuncts(A & B & C) == {A, B, C} + assert conjuncts((A | B) & C) == {A | B, C} + assert conjuncts(A) == {A} + assert conjuncts(True) == {True} + assert conjuncts(False) == {False} + + +def test_disjuncts(): + assert disjuncts(A | B | C) == {A, B, C} + assert disjuncts((A | B) & C) == {(A | B) & C} + assert disjuncts(A) == {A} + assert disjuncts(True) == {True} + assert disjuncts(False) == {False} + + +def test_distribute(): + assert distribute_and_over_or(Or(And(A, B), C)) == And(Or(A, C), Or(B, C)) + assert distribute_or_over_and(And(A, Or(B, C))) == Or(And(A, B), And(A, C)) + assert distribute_xor_over_and(And(A, Xor(B, C))) == Xor(And(A, B), And(A, C)) + + +def test_to_anf(): + x, y, z = symbols('x,y,z') + assert to_anf(And(x, y)) == And(x, y) + assert to_anf(Or(x, y)) == Xor(x, y, And(x, y)) + assert to_anf(Or(Implies(x, y), And(x, y), y)) == \ + Xor(x, True, x & y, remove_true=False) + assert to_anf(Or(Nand(x, y), Nor(x, y), Xnor(x, y), Implies(x, y))) == True + assert to_anf(Or(x, Not(y), Nor(x,z), And(x, y), Nand(y, z))) == \ + Xor(True, And(y, z), And(x, y, z), remove_true=False) + assert to_anf(Xor(x, y)) == Xor(x, y) + assert to_anf(Not(x)) == Xor(x, True, remove_true=False) + assert to_anf(Nand(x, y)) == Xor(True, And(x, y), remove_true=False) + assert to_anf(Nor(x, y)) == Xor(x, y, True, And(x, y), remove_true=False) + assert to_anf(Implies(x, y)) == Xor(x, True, And(x, y), remove_true=False) + assert to_anf(Equivalent(x, y)) == Xor(x, y, True, remove_true=False) + assert to_anf(Nand(x | y, x >> y), deep=False) == \ + Xor(True, And(Or(x, y), Implies(x, y)), remove_true=False) + assert to_anf(Nor(x ^ y, x & y), deep=False) == \ + Xor(True, Or(Xor(x, y), And(x, y)), remove_true=False) + # issue 25218 + assert to_anf(x ^ ~(x ^ y ^ ~y)) == False + + +def test_to_nnf(): + assert to_nnf(true) is true + assert to_nnf(false) is false + assert to_nnf(A) == A + assert to_nnf(A | ~A | B) is true + assert to_nnf(A & ~A & B) is false + assert to_nnf(A >> B) == ~A | B + assert to_nnf(Equivalent(A, B, C)) == (~A | B) & (~B | C) & (~C | A) + assert to_nnf(A ^ B ^ C) == \ + (A | B | C) & (~A | ~B | C) & (A | ~B | ~C) & (~A | B | ~C) + assert to_nnf(ITE(A, B, C)) == (~A | B) & (A | C) + assert to_nnf(Not(A | B | C)) == ~A & ~B & ~C + assert to_nnf(Not(A & B & C)) == ~A | ~B | ~C + assert to_nnf(Not(A >> B)) == A & ~B + assert to_nnf(Not(Equivalent(A, B, C))) == And(Or(A, B, C), Or(~A, ~B, ~C)) + assert to_nnf(Not(A ^ B ^ C)) == \ + (~A | B | C) & (A | ~B | C) & (A | B | ~C) & (~A | ~B | ~C) + assert to_nnf(Not(ITE(A, B, C))) == (~A | ~B) & (A | ~C) + assert to_nnf((A >> B) ^ (B >> A)) == (A & ~B) | (~A & B) + assert to_nnf((A >> B) ^ (B >> A), False) == \ + (~A | ~B | A | B) & ((A & ~B) | (~A & B)) + assert ITE(A, 1, 0).to_nnf() == A + assert ITE(A, 0, 1).to_nnf() == ~A + # although ITE can hold non-Boolean, it will complain if + # an attempt is made to convert the ITE to Boolean nnf + raises(TypeError, lambda: ITE(A < 1, [1], B).to_nnf()) + + +def test_to_cnf(): + assert to_cnf(~(B | C)) == And(Not(B), Not(C)) + assert to_cnf((A & B) | C) == And(Or(A, C), Or(B, C)) + assert to_cnf(A >> B) == (~A) | B + assert to_cnf(A >> (B & C)) == (~A | B) & (~A | C) + assert to_cnf(A & (B | C) | ~A & (B | C), True) == B | C + assert to_cnf(A & B) == And(A, B) + + assert to_cnf(Equivalent(A, B)) == And(Or(A, Not(B)), Or(B, Not(A))) + assert to_cnf(Equivalent(A, B & C)) == \ + (~A | B) & (~A | C) & (~B | ~C | A) + assert to_cnf(Equivalent(A, B | C), True) == \ + And(Or(Not(B), A), Or(Not(C), A), Or(B, C, Not(A))) + assert to_cnf(A + 1) == A + 1 + + +def test_issue_18904(): + x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 = symbols('x1:16') + eq = (( x1 & x2 & x3 & x4 & x5 & x6 & x7 & x8 & x9 ) | + ( x1 & x2 & x3 & x4 & x5 & x6 & x7 & x10 & x9 ) | + ( x1 & x11 & x3 & x12 & x5 & x13 & x14 & x15 & x9 )) + assert is_cnf(to_cnf(eq)) + raises(ValueError, lambda: to_cnf(eq, simplify=True)) + for f, t in zip((And, Or), (to_cnf, to_dnf)): + eq = f(x1, x2, x3, x4, x5, x6, x7, x8, x9) + raises(ValueError, lambda: to_cnf(eq, simplify=True)) + assert t(eq, simplify=True, force=True) == eq + + +def test_issue_9949(): + assert is_cnf(to_cnf((b > -5) | (a > 2) & (a < 4))) + + +def test_to_CNF(): + assert CNF.CNF_to_cnf(CNF.to_CNF(~(B | C))) == to_cnf(~(B | C)) + assert CNF.CNF_to_cnf(CNF.to_CNF((A & B) | C)) == to_cnf((A & B) | C) + assert CNF.CNF_to_cnf(CNF.to_CNF(A >> B)) == to_cnf(A >> B) + assert CNF.CNF_to_cnf(CNF.to_CNF(A >> (B & C))) == to_cnf(A >> (B & C)) + assert CNF.CNF_to_cnf(CNF.to_CNF(A & (B | C) | ~A & (B | C))) == to_cnf(A & (B | C) | ~A & (B | C)) + assert CNF.CNF_to_cnf(CNF.to_CNF(A & B)) == to_cnf(A & B) + + +def test_to_dnf(): + assert to_dnf(~(B | C)) == And(Not(B), Not(C)) + assert to_dnf(A & (B | C)) == Or(And(A, B), And(A, C)) + assert to_dnf(A >> B) == (~A) | B + assert to_dnf(A >> (B & C)) == (~A) | (B & C) + assert to_dnf(A | B) == A | B + + assert to_dnf(Equivalent(A, B), True) == \ + Or(And(A, B), And(Not(A), Not(B))) + assert to_dnf(Equivalent(A, B & C), True) == \ + Or(And(A, B, C), And(Not(A), Not(B)), And(Not(A), Not(C))) + assert to_dnf(A + 1) == A + 1 + + +def test_to_int_repr(): + x, y, z = map(Boolean, symbols('x,y,z')) + + def sorted_recursive(arg): + try: + return sorted(sorted_recursive(x) for x in arg) + except TypeError: # arg is not a sequence + return arg + + assert sorted_recursive(to_int_repr([x | y, z | x], [x, y, z])) == \ + sorted_recursive([[1, 2], [1, 3]]) + assert sorted_recursive(to_int_repr([x | y, z | ~x], [x, y, z])) == \ + sorted_recursive([[1, 2], [3, -1]]) + + +def test_is_anf(): + x, y = symbols('x,y') + assert is_anf(true) is True + assert is_anf(false) is True + assert is_anf(x) is True + assert is_anf(And(x, y)) is True + assert is_anf(Xor(x, y, And(x, y))) is True + assert is_anf(Xor(x, y, Or(x, y))) is False + assert is_anf(Xor(Not(x), y)) is False + + +def test_is_nnf(): + assert is_nnf(true) is True + assert is_nnf(A) is True + assert is_nnf(~A) is True + assert is_nnf(A & B) is True + assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), False) is True + assert is_nnf((A | B) & (~A | ~B)) is True + assert is_nnf(Not(Or(A, B))) is False + assert is_nnf(A ^ B) is False + assert is_nnf((A & B) | (~A & A) | (~B & B) | (~A & ~B), True) is False + + +def test_is_cnf(): + assert is_cnf(x) is True + assert is_cnf(x | y | z) is True + assert is_cnf(x & y & z) is True + assert is_cnf((x | y) & z) is True + assert is_cnf((x & y) | z) is False + assert is_cnf(~(x & y) | z) is False + + +def test_is_dnf(): + assert is_dnf(x) is True + assert is_dnf(x | y | z) is True + assert is_dnf(x & y & z) is True + assert is_dnf((x & y) | z) is True + assert is_dnf((x | y) & z) is False + assert is_dnf(~(x | y) & z) is False + + +def test_ITE(): + A, B, C = symbols('A:C') + assert ITE(True, False, True) is false + assert ITE(True, True, False) is true + assert ITE(False, True, False) is false + assert ITE(False, False, True) is true + assert isinstance(ITE(A, B, C), ITE) + + A = True + assert ITE(A, B, C) == B + A = False + assert ITE(A, B, C) == C + B = True + assert ITE(And(A, B), B, C) == C + assert ITE(Or(A, False), And(B, True), False) is false + assert ITE(x, A, B) == Not(x) + assert ITE(x, B, A) == x + assert ITE(1, x, y) == x + assert ITE(0, x, y) == y + raises(TypeError, lambda: ITE(2, x, y)) + raises(TypeError, lambda: ITE(1, [], y)) + raises(TypeError, lambda: ITE(1, (), y)) + raises(TypeError, lambda: ITE(1, y, [])) + assert ITE(1, 1, 1) is S.true + assert isinstance(ITE(1, 1, 1, evaluate=False), ITE) + + assert ITE(Eq(x, True), y, x) == ITE(x, y, x) + assert ITE(Eq(x, False), y, x) == ITE(~x, y, x) + assert ITE(Ne(x, True), y, x) == ITE(~x, y, x) + assert ITE(Ne(x, False), y, x) == ITE(x, y, x) + assert ITE(Eq(S. true, x), y, x) == ITE(x, y, x) + assert ITE(Eq(S.false, x), y, x) == ITE(~x, y, x) + assert ITE(Ne(S.true, x), y, x) == ITE(~x, y, x) + assert ITE(Ne(S.false, x), y, x) == ITE(x, y, x) + # 0 and 1 in the context are not treated as True/False + # so the equality must always be False since dissimilar + # objects cannot be equal + assert ITE(Eq(x, 0), y, x) == x + assert ITE(Eq(x, 1), y, x) == x + assert ITE(Ne(x, 0), y, x) == y + assert ITE(Ne(x, 1), y, x) == y + assert ITE(Eq(x, 0), y, z).subs(x, 0) == y + assert ITE(Eq(x, 0), y, z).subs(x, 1) == z + raises(ValueError, lambda: ITE(x > 1, y, x, z)) + + +def test_is_literal(): + assert is_literal(True) is True + assert is_literal(False) is True + assert is_literal(A) is True + assert is_literal(~A) is True + assert is_literal(Or(A, B)) is False + assert is_literal(Q.zero(A)) is True + assert is_literal(Not(Q.zero(A))) is True + assert is_literal(Or(A, B)) is False + assert is_literal(And(Q.zero(A), Q.zero(B))) is False + assert is_literal(x < 3) + assert not is_literal(x + y < 3) + + +def test_operators(): + # Mostly test __and__, __rand__, and so on + assert True & A == A & True == A + assert False & A == A & False == False + assert A & B == And(A, B) + assert True | A == A | True == True + assert False | A == A | False == A + assert A | B == Or(A, B) + assert ~A == Not(A) + assert True >> A == A << True == A + assert False >> A == A << False == True + assert A >> True == True << A == True + assert A >> False == False << A == ~A + assert A >> B == B << A == Implies(A, B) + assert True ^ A == A ^ True == ~A + assert False ^ A == A ^ False == A + assert A ^ B == Xor(A, B) + + +def test_true_false(): + assert true is S.true + assert false is S.false + assert true is not True + assert false is not False + assert true + assert not false + assert true == True + assert false == False + assert not (true == False) + assert not (false == True) + assert not (true == false) + + assert hash(true) == hash(True) + assert hash(false) == hash(False) + assert len({true, True}) == len({false, False}) == 1 + + assert isinstance(true, BooleanAtom) + assert isinstance(false, BooleanAtom) + # We don't want to subclass from bool, because bool subclasses from + # int. But operators like &, |, ^, <<, >>, and ~ act differently on 0 and + # 1 then we want them to on true and false. See the docstrings of the + # various And, Or, etc. functions for examples. + assert not isinstance(true, bool) + assert not isinstance(false, bool) + + # Note: using 'is' comparison is important here. We want these to return + # true and false, not True and False + + assert Not(true) is false + assert Not(True) is false + assert Not(false) is true + assert Not(False) is true + assert ~true is false + assert ~false is true + + for T, F in product((True, true), (False, false)): + assert And(T, F) is false + assert And(F, T) is false + assert And(F, F) is false + assert And(T, T) is true + assert And(T, x) == x + assert And(F, x) is false + if not (T is True and F is False): + assert T & F is false + assert F & T is false + if F is not False: + assert F & F is false + if T is not True: + assert T & T is true + + assert Or(T, F) is true + assert Or(F, T) is true + assert Or(F, F) is false + assert Or(T, T) is true + assert Or(T, x) is true + assert Or(F, x) == x + if not (T is True and F is False): + assert T | F is true + assert F | T is true + if F is not False: + assert F | F is false + if T is not True: + assert T | T is true + + assert Xor(T, F) is true + assert Xor(F, T) is true + assert Xor(F, F) is false + assert Xor(T, T) is false + assert Xor(T, x) == ~x + assert Xor(F, x) == x + if not (T is True and F is False): + assert T ^ F is true + assert F ^ T is true + if F is not False: + assert F ^ F is false + if T is not True: + assert T ^ T is false + + assert Nand(T, F) is true + assert Nand(F, T) is true + assert Nand(F, F) is true + assert Nand(T, T) is false + assert Nand(T, x) == ~x + assert Nand(F, x) is true + + assert Nor(T, F) is false + assert Nor(F, T) is false + assert Nor(F, F) is true + assert Nor(T, T) is false + assert Nor(T, x) is false + assert Nor(F, x) == ~x + + assert Implies(T, F) is false + assert Implies(F, T) is true + assert Implies(F, F) is true + assert Implies(T, T) is true + assert Implies(T, x) == x + assert Implies(F, x) is true + assert Implies(x, T) is true + assert Implies(x, F) == ~x + if not (T is True and F is False): + assert T >> F is false + assert F << T is false + assert F >> T is true + assert T << F is true + if F is not False: + assert F >> F is true + assert F << F is true + if T is not True: + assert T >> T is true + assert T << T is true + + assert Equivalent(T, F) is false + assert Equivalent(F, T) is false + assert Equivalent(F, F) is true + assert Equivalent(T, T) is true + assert Equivalent(T, x) == x + assert Equivalent(F, x) == ~x + assert Equivalent(x, T) == x + assert Equivalent(x, F) == ~x + + assert ITE(T, T, T) is true + assert ITE(T, T, F) is true + assert ITE(T, F, T) is false + assert ITE(T, F, F) is false + assert ITE(F, T, T) is true + assert ITE(F, T, F) is false + assert ITE(F, F, T) is true + assert ITE(F, F, F) is false + + assert all(i.simplify(1, 2) is i for i in (S.true, S.false)) + + +def test_bool_as_set(): + assert ITE(y <= 0, False, y >= 1).as_set() == Interval(1, oo) + assert And(x <= 2, x >= -2).as_set() == Interval(-2, 2) + assert Or(x >= 2, x <= -2).as_set() == Interval(-oo, -2) + Interval(2, oo) + assert Not(x > 2).as_set() == Interval(-oo, 2) + # issue 10240 + assert Not(And(x > 2, x < 3)).as_set() == \ + Union(Interval(-oo, 2), Interval(3, oo)) + assert true.as_set() == S.UniversalSet + assert false.as_set() is S.EmptySet + assert x.as_set() == S.UniversalSet + assert And(Or(x < 1, x > 3), x < 2).as_set() == Interval.open(-oo, 1) + assert And(x < 1, sin(x) < 3).as_set() == (x < 1).as_set() + raises(NotImplementedError, lambda: (sin(x) < 1).as_set()) + # watch for object morph in as_set + assert Eq(-1, cos(2*x)**2/sin(2*x)**2).as_set() is S.EmptySet + + +@XFAIL +def test_multivariate_bool_as_set(): + x, y = symbols('x,y') + + assert And(x >= 0, y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + assert Or(x >= 0, y >= 0).as_set() == S.Reals*S.Reals - \ + Interval(-oo, 0, True, True)*Interval(-oo, 0, True, True) + + +def test_all_or_nothing(): + x = symbols('x', extended_real=True) + args = x >= -oo, x <= oo + v = And(*args) + if v.func is And: + assert len(v.args) == len(args) - args.count(S.true) + else: + assert v == True + v = Or(*args) + if v.func is Or: + assert len(v.args) == 2 + else: + assert v == True + + +def test_canonical_atoms(): + assert true.canonical == true + assert false.canonical == false + + +def test_negated_atoms(): + assert true.negated == false + assert false.negated == true + + +def test_issue_8777(): + assert And(x > 2, x < oo).as_set() == Interval(2, oo, left_open=True) + assert And(x >= 1, x < oo).as_set() == Interval(1, oo) + assert (x < oo).as_set() == Interval(-oo, oo) + assert (x > -oo).as_set() == Interval(-oo, oo) + + +def test_issue_8975(): + assert Or(And(-oo < x, x <= -2), And(2 <= x, x < oo)).as_set() == \ + Interval(-oo, -2) + Interval(2, oo) + + +def test_term_to_integer(): + assert term_to_integer([1, 0, 1, 0, 0, 1, 0]) == 82 + assert term_to_integer('0010101000111001') == 10809 + + +def test_issue_21971(): + a, b, c, d = symbols('a b c d') + f = a & b & c | a & c + assert f.subs(a & c, d) == b & d | d + assert f.subs(a & b & c, d) == a & c | d + + f = (a | b | c) & (a | c) + assert f.subs(a | c, d) == (b | d) & d + assert f.subs(a | b | c, d) == (a | c) & d + + f = (a ^ b ^ c) & (a ^ c) + assert f.subs(a ^ c, d) == (b ^ d) & d + assert f.subs(a ^ b ^ c, d) == (a ^ c) & d + + +def test_truth_table(): + assert list(truth_table(And(x, y), [x, y], input=False)) == \ + [False, False, False, True] + assert list(truth_table(x | y, [x, y], input=False)) == \ + [False, True, True, True] + assert list(truth_table(x >> y, [x, y], input=False)) == \ + [True, True, False, True] + assert list(truth_table(And(x, y), [x, y])) == \ + [([0, 0], False), ([0, 1], False), ([1, 0], False), ([1, 1], True)] + + +def test_issue_8571(): + for t in (S.true, S.false): + raises(TypeError, lambda: +t) + raises(TypeError, lambda: -t) + raises(TypeError, lambda: abs(t)) + # use int(bool(t)) to get 0 or 1 + raises(TypeError, lambda: int(t)) + + for o in [S.Zero, S.One, x]: + for _ in range(2): + raises(TypeError, lambda: o + t) + raises(TypeError, lambda: o - t) + raises(TypeError, lambda: o % t) + raises(TypeError, lambda: o*t) + raises(TypeError, lambda: o/t) + raises(TypeError, lambda: o**t) + o, t = t, o # do again in reversed order + + +def test_expand_relational(): + n = symbols('n', negative=True) + p, q = symbols('p q', positive=True) + r = ((n + q*(-n/q + 1))/(q*(-n/q + 1)) < 0) + assert r is not S.false + assert r.expand() is S.false + assert (q > 0).expand() is S.true + + +def test_issue_12717(): + assert S.true.is_Atom == True + assert S.false.is_Atom == True + + +def test_as_Boolean(): + nz = symbols('nz', nonzero=True) + assert all(as_Boolean(i) is S.true for i in (True, S.true, 1, nz)) + z = symbols('z', zero=True) + assert all(as_Boolean(i) is S.false for i in (False, S.false, 0, z)) + assert all(as_Boolean(i) == i for i in (x, x < 0)) + for i in (2, S(2), x + 1, []): + raises(TypeError, lambda: as_Boolean(i)) + + +def test_binary_symbols(): + assert ITE(x < 1, y, z).binary_symbols == {y, z} + for f in (Eq, Ne): + assert f(x, 1).binary_symbols == set() + assert f(x, True).binary_symbols == {x} + assert f(x, False).binary_symbols == {x} + assert S.true.binary_symbols == set() + assert S.false.binary_symbols == set() + assert x.binary_symbols == {x} + assert And(x, Eq(y, False), Eq(z, 1)).binary_symbols == {x, y} + assert Q.prime(x).binary_symbols == set() + assert Q.lt(x, 1).binary_symbols == set() + assert Q.is_true(x).binary_symbols == {x} + assert Q.eq(x, True).binary_symbols == {x} + assert Q.prime(x).binary_symbols == set() + + +def test_BooleanFunction_diff(): + assert And(x, y).diff(x) == Piecewise((0, Eq(y, False)), (1, True)) + + +def test_issue_14700(): + A, B, C, D, E, F, G, H = symbols('A B C D E F G H') + q = ((B & D & H & ~F) | (B & H & ~C & ~D) | (B & H & ~C & ~F) | + (B & H & ~D & ~G) | (B & H & ~F & ~G) | (C & G & ~B & ~D) | + (C & G & ~D & ~H) | (C & G & ~F & ~H) | (D & F & H & ~B) | + (D & F & ~G & ~H) | (B & D & F & ~C & ~H) | (D & E & F & ~B & ~C) | + (D & F & ~A & ~B & ~C) | (D & F & ~A & ~C & ~H) | + (A & B & D & F & ~E & ~H)) + soldnf = ((B & D & H & ~F) | (D & F & H & ~B) | (B & H & ~C & ~D) | + (B & H & ~D & ~G) | (C & G & ~B & ~D) | (C & G & ~D & ~H) | + (C & G & ~F & ~H) | (D & F & ~G & ~H) | (D & E & F & ~C & ~H) | + (D & F & ~A & ~C & ~H) | (A & B & D & F & ~E & ~H)) + solcnf = ((B | C | D) & (B | D | G) & (C | D | H) & (C | F | H) & + (D | G | H) & (F | G | H) & (B | F | ~D | ~H) & + (~B | ~D | ~F | ~H) & (D | ~B | ~C | ~G | ~H) & + (A | H | ~C | ~D | ~F | ~G) & (H | ~C | ~D | ~E | ~F | ~G) & + (B | E | H | ~A | ~D | ~F | ~G)) + assert simplify_logic(q, "dnf") == soldnf + assert simplify_logic(q, "cnf") == solcnf + + minterms = [[0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 1, 0], [0, 1, 1, 1], + [0, 0, 1, 1], [1, 0, 1, 1]] + dontcares = [[1, 0, 0, 0], [1, 0, 0, 1], [1, 1, 0, 0], [1, 1, 0, 1]] + assert SOPform([w, x, y, z], minterms) == (x & ~w) | (y & z & ~x) + # Should not be more complicated with don't cares + assert SOPform([w, x, y, z], minterms, dontcares) == \ + (x & ~w) | (y & z & ~x) + + +def test_issue_25115(): + cond = Contains(x, S.Integers) + # Previously this raised an exception: + assert simplify_logic(cond) == cond + + +def test_relational_simplification(): + w, x, y, z = symbols('w x y z', real=True) + d, e = symbols('d e', real=False) + # Test all combinations or sign and order + assert Or(x >= y, x < y).simplify() == S.true + assert Or(x >= y, y > x).simplify() == S.true + assert Or(x >= y, -x > -y).simplify() == S.true + assert Or(x >= y, -y < -x).simplify() == S.true + assert Or(-x <= -y, x < y).simplify() == S.true + assert Or(-x <= -y, -x > -y).simplify() == S.true + assert Or(-x <= -y, y > x).simplify() == S.true + assert Or(-x <= -y, -y < -x).simplify() == S.true + assert Or(y <= x, x < y).simplify() == S.true + assert Or(y <= x, y > x).simplify() == S.true + assert Or(y <= x, -x > -y).simplify() == S.true + assert Or(y <= x, -y < -x).simplify() == S.true + assert Or(-y >= -x, x < y).simplify() == S.true + assert Or(-y >= -x, y > x).simplify() == S.true + assert Or(-y >= -x, -x > -y).simplify() == S.true + assert Or(-y >= -x, -y < -x).simplify() == S.true + + assert Or(x < y, x >= y).simplify() == S.true + assert Or(y > x, x >= y).simplify() == S.true + assert Or(-x > -y, x >= y).simplify() == S.true + assert Or(-y < -x, x >= y).simplify() == S.true + assert Or(x < y, -x <= -y).simplify() == S.true + assert Or(-x > -y, -x <= -y).simplify() == S.true + assert Or(y > x, -x <= -y).simplify() == S.true + assert Or(-y < -x, -x <= -y).simplify() == S.true + assert Or(x < y, y <= x).simplify() == S.true + assert Or(y > x, y <= x).simplify() == S.true + assert Or(-x > -y, y <= x).simplify() == S.true + assert Or(-y < -x, y <= x).simplify() == S.true + assert Or(x < y, -y >= -x).simplify() == S.true + assert Or(y > x, -y >= -x).simplify() == S.true + assert Or(-x > -y, -y >= -x).simplify() == S.true + assert Or(-y < -x, -y >= -x).simplify() == S.true + + # Some other tests + assert Or(x >= y, w < z, x <= y).simplify() == S.true + assert And(x >= y, x < y).simplify() == S.false + assert Or(x >= y, Eq(y, x)).simplify() == (x >= y) + assert And(x >= y, Eq(y, x)).simplify() == Eq(x, y) + assert And(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y).simplify() == \ + (Eq(x, y) & (x >= 1) & (y >= 5) & (y > z)) + assert Or(Eq(x, y), x >= y, w < y, z < y).simplify() == \ + (x >= y) | (y > z) | (w < y) + assert And(Eq(x, y), x >= y, w < y, y >= z, z < y).simplify() == \ + Eq(x, y) & (y > z) & (w < y) + # assert And(Eq(x, y), x >= y, w < y, y >= z, z < y).simplify(relational_minmax=True) == \ + # And(Eq(x, y), y > Max(w, z)) + # assert Or(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y).simplify(relational_minmax=True) == \ + # (Eq(x, y) | (x >= 1) | (y > Min(2, z))) + assert And(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y).simplify() == \ + (Eq(x, y) & (x >= 1) & (y >= 5) & (y > z)) + assert (Eq(x, y) & Eq(d, e) & (x >= y) & (d >= e)).simplify() == \ + (Eq(x, y) & Eq(d, e) & (d >= e)) + assert And(Eq(x, y), Eq(x, -y)).simplify() == And(Eq(x, 0), Eq(y, 0)) + assert Xor(x >= y, x <= y).simplify() == Ne(x, y) + assert And(x > 1, x < -1, Eq(x, y)).simplify() == S.false + # From #16690 + assert And(x >= y, Eq(y, 0)).simplify() == And(x >= 0, Eq(y, 0)) + assert Or(Ne(x, 1), Ne(x, 2)).simplify() == S.true + assert And(Eq(x, 1), Ne(2, x)).simplify() == Eq(x, 1) + assert Or(Eq(x, 1), Ne(2, x)).simplify() == Ne(x, 2) + +def test_issue_8373(): + x = symbols('x', real=True) + assert Or(x < 1, x > -1).simplify() == S.true + assert Or(x < 1, x >= 1).simplify() == S.true + assert And(x < 1, x >= 1).simplify() == S.false + assert Or(x <= 1, x >= 1).simplify() == S.true + + +def test_issue_7950(): + x = symbols('x', real=True) + assert And(Eq(x, 1), Eq(x, 2)).simplify() == S.false + + +@slow +def test_relational_simplification_numerically(): + def test_simplification_numerically_function(original, simplified): + symb = original.free_symbols + n = len(symb) + valuelist = list(set(combinations(list(range(-(n-1), n))*n, n))) + for values in valuelist: + sublist = dict(zip(symb, values)) + originalvalue = original.subs(sublist) + simplifiedvalue = simplified.subs(sublist) + assert originalvalue == simplifiedvalue, "Original: {}\nand"\ + " simplified: {}\ndo not evaluate to the same value for {}"\ + "".format(original, simplified, sublist) + + w, x, y, z = symbols('w x y z', real=True) + d, e = symbols('d e', real=False) + + expressions = (And(Eq(x, y), x >= y, w < y, y >= z, z < y), + And(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y), + Or(Eq(x, y), x >= 1, 2 < y, y >= 5, z < y), + And(x >= y, Eq(y, x)), + Or(And(Eq(x, y), x >= y, w < y, Or(y >= z, z < y)), + And(Eq(x, y), x >= 1, 2 < y, y >= -1, z < y)), + (Eq(x, y) & Eq(d, e) & (x >= y) & (d >= e)), + ) + + for expression in expressions: + test_simplification_numerically_function(expression, + expression.simplify()) + + +def test_relational_simplification_patterns_numerically(): + from sympy.core import Wild + from sympy.logic.boolalg import _simplify_patterns_and, \ + _simplify_patterns_or, _simplify_patterns_xor + a = Wild('a') + b = Wild('b') + c = Wild('c') + symb = [a, b, c] + patternlists = [[And, _simplify_patterns_and()], + [Or, _simplify_patterns_or()], + [Xor, _simplify_patterns_xor()]] + valuelist = list(set(combinations(list(range(-2, 3))*3, 3))) + # Skip combinations of +/-2 and 0, except for all 0 + valuelist = [v for v in valuelist if any(w % 2 for w in v) or not any(v)] + for func, patternlist in patternlists: + for pattern in patternlist: + original = func(*pattern[0].args) + simplified = pattern[1] + for values in valuelist: + sublist = dict(zip(symb, values)) + originalvalue = original.xreplace(sublist) + simplifiedvalue = simplified.xreplace(sublist) + assert originalvalue == simplifiedvalue, "Original: {}\nand"\ + " simplified: {}\ndo not evaluate to the same value for"\ + "{}".format(pattern[0], simplified, sublist) + + +def test_issue_16803(): + n = symbols('n') + # No simplification done, but should not raise an exception + assert ((n > 3) | (n < 0) | ((n > 0) & (n < 3))).simplify() == \ + (n > 3) | (n < 0) | ((n > 0) & (n < 3)) + + +def test_issue_17530(): + r = {x: oo, y: oo} + assert Or(x + y > 0, x - y < 0).subs(r) + assert not And(x + y < 0, x - y < 0).subs(r) + raises(TypeError, lambda: Or(x + y < 0, x - y < 0).subs(r)) + raises(TypeError, lambda: And(x + y > 0, x - y < 0).subs(r)) + raises(TypeError, lambda: And(x + y > 0, x - y < 0).subs(r)) + + +def test_anf_coeffs(): + assert anf_coeffs([1, 0]) == [1, 1] + assert anf_coeffs([0, 0, 0, 1]) == [0, 0, 0, 1] + assert anf_coeffs([0, 1, 1, 1]) == [0, 1, 1, 1] + assert anf_coeffs([1, 1, 1, 0]) == [1, 0, 0, 1] + assert anf_coeffs([1, 0, 0, 0]) == [1, 1, 1, 1] + assert anf_coeffs([1, 0, 0, 1]) == [1, 1, 1, 0] + assert anf_coeffs([1, 1, 0, 1]) == [1, 0, 1, 1] + + +def test_ANFform(): + x, y = symbols('x,y') + assert ANFform([x], [1, 1]) == True + assert ANFform([x], [0, 0]) == False + assert ANFform([x], [1, 0]) == Xor(x, True, remove_true=False) + assert ANFform([x, y], [1, 1, 1, 0]) == \ + Xor(True, And(x, y), remove_true=False) + + +def test_bool_minterm(): + x, y = symbols('x,y') + assert bool_minterm(3, [x, y]) == And(x, y) + assert bool_minterm([1, 0], [x, y]) == And(Not(y), x) + + +def test_bool_maxterm(): + x, y = symbols('x,y') + assert bool_maxterm(2, [x, y]) == Or(Not(x), y) + assert bool_maxterm([0, 1], [x, y]) == Or(Not(y), x) + + +def test_bool_monomial(): + x, y = symbols('x,y') + assert bool_monomial(1, [x, y]) == y + assert bool_monomial([1, 1], [x, y]) == And(x, y) + + +def test_check_pair(): + assert _check_pair([0, 1, 0], [0, 1, 1]) == 2 + assert _check_pair([0, 1, 0], [1, 1, 1]) == -1 + + +def test_issue_19114(): + expr = (B & C) | (A & ~C) | (~A & ~B) + # Expression is minimal, but there are multiple minimal forms possible + res1 = (A & B) | (C & ~A) | (~B & ~C) + result = to_dnf(expr, simplify=True) + assert result in (expr, res1) + + +def test_issue_20870(): + result = SOPform([a, b, c, d], [1, 2, 3, 4, 5, 6, 8, 9, 11, 12, 14, 15]) + expected = ((d & ~b) | (a & b & c) | (a & ~c & ~d) | + (b & ~a & ~c) | (c & ~a & ~d)) + assert result == expected + + +def test_convert_to_varsSOP(): + assert _convert_to_varsSOP([0, 1, 0], [x, y, z]) == And(Not(x), y, Not(z)) + assert _convert_to_varsSOP([3, 1, 0], [x, y, z]) == And(y, Not(z)) + + +def test_convert_to_varsPOS(): + assert _convert_to_varsPOS([0, 1, 0], [x, y, z]) == Or(x, Not(y), z) + assert _convert_to_varsPOS([3, 1, 0], [x, y, z]) == Or(Not(y), z) + + +def test_gateinputcount(): + a, b, c, d, e = symbols('a:e') + assert gateinputcount(And(a, b)) == 2 + assert gateinputcount(a | b & c & d ^ (e | a)) == 9 + assert gateinputcount(And(a, True)) == 0 + raises(TypeError, lambda: gateinputcount(a*b)) + + +def test_refine(): + # relational + assert not refine(x < 0, ~(x < 0)) + assert refine(x < 0, (x < 0)) + assert refine(x < 0, (0 > x)) is S.true + assert refine(x < 0, (y < 0)) == (x < 0) + assert not refine(x <= 0, ~(x <= 0)) + assert refine(x <= 0, (x <= 0)) + assert refine(x <= 0, (0 >= x)) is S.true + assert refine(x <= 0, (y <= 0)) == (x <= 0) + assert not refine(x > 0, ~(x > 0)) + assert refine(x > 0, (x > 0)) + assert refine(x > 0, (0 < x)) is S.true + assert refine(x > 0, (y > 0)) == (x > 0) + assert not refine(x >= 0, ~(x >= 0)) + assert refine(x >= 0, (x >= 0)) + assert refine(x >= 0, (0 <= x)) is S.true + assert refine(x >= 0, (y >= 0)) == (x >= 0) + assert not refine(Eq(x, 0), ~(Eq(x, 0))) + assert refine(Eq(x, 0), (Eq(x, 0))) + assert refine(Eq(x, 0), (Eq(0, x))) is S.true + assert refine(Eq(x, 0), (Eq(y, 0))) == Eq(x, 0) + assert not refine(Ne(x, 0), ~(Ne(x, 0))) + assert refine(Ne(x, 0), (Ne(0, x))) is S.true + assert refine(Ne(x, 0), (Ne(x, 0))) + assert refine(Ne(x, 0), (Ne(y, 0))) == (Ne(x, 0)) + + # boolean functions + assert refine(And(x > 0, y > 0), (x > 0)) == (y > 0) + assert refine(And(x > 0, y > 0), (x > 0) & (y > 0)) is S.true + + # predicates + assert refine(Q.positive(x), Q.positive(x)) is S.true + assert refine(Q.positive(x), Q.negative(x)) is S.false + assert refine(Q.positive(x), Q.real(x)) == Q.positive(x) + + +def test_relational_threeterm_simplification_patterns_numerically(): + from sympy.core import Wild + from sympy.logic.boolalg import _simplify_patterns_and3 + a = Wild('a') + b = Wild('b') + c = Wild('c') + symb = [a, b, c] + patternlists = [[And, _simplify_patterns_and3()]] + valuelist = list(set(combinations(list(range(-2, 3))*3, 3))) + # Skip combinations of +/-2 and 0, except for all 0 + valuelist = [v for v in valuelist if any(w % 2 for w in v) or not any(v)] + for func, patternlist in patternlists: + for pattern in patternlist: + original = func(*pattern[0].args) + simplified = pattern[1] + for values in valuelist: + sublist = dict(zip(symb, values)) + originalvalue = original.xreplace(sublist) + simplifiedvalue = simplified.xreplace(sublist) + assert originalvalue == simplifiedvalue, "Original: {}\nand"\ + " simplified: {}\ndo not evaluate to the same value for"\ + "{}".format(pattern[0], simplified, sublist) + + +def test_issue_25451(): + x = Or(And(a, c), Eq(a, b)) + assert isinstance(x, Or) + assert set(x.args) == {And(a, c), Eq(a, b)} diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_dimacs.py b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_dimacs.py new file mode 100644 index 0000000000000000000000000000000000000000..3a9a51a39d33fb807688614cb5809b621ce21a2c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_dimacs.py @@ -0,0 +1,234 @@ +"""Various tests on satisfiability using dimacs cnf file syntax +You can find lots of cnf files in +ftp://dimacs.rutgers.edu/pub/challenge/satisfiability/benchmarks/cnf/ +""" + +from sympy.logic.utilities.dimacs import load +from sympy.logic.algorithms.dpll import dpll_satisfiable + + +def test_f1(): + assert bool(dpll_satisfiable(load(f1))) + + +def test_f2(): + assert bool(dpll_satisfiable(load(f2))) + + +def test_f3(): + assert bool(dpll_satisfiable(load(f3))) + + +def test_f4(): + assert not bool(dpll_satisfiable(load(f4))) + + +def test_f5(): + assert bool(dpll_satisfiable(load(f5))) + +f1 = """c simple example +c Resolution: SATISFIABLE +c +p cnf 3 2 +1 -3 0 +2 3 -1 0 +""" + + +f2 = """c an example from Quinn's text, 16 variables and 18 clauses. +c Resolution: SATISFIABLE +c +p cnf 16 18 + 1 2 0 + -2 -4 0 + 3 4 0 + -4 -5 0 + 5 -6 0 + 6 -7 0 + 6 7 0 + 7 -16 0 + 8 -9 0 + -8 -14 0 + 9 10 0 + 9 -10 0 +-10 -11 0 + 10 12 0 + 11 12 0 + 13 14 0 + 14 -15 0 + 15 16 0 +""" + +f3 = """c +p cnf 6 9 +-1 0 +-3 0 +2 -1 0 +2 -4 0 +5 -4 0 +-1 -3 0 +-4 -6 0 +1 3 -2 0 +4 6 -2 -5 0 +""" + +f4 = """c +c file: hole6.cnf [http://people.sc.fsu.edu/~jburkardt/data/cnf/hole6.cnf] +c +c SOURCE: John Hooker (jh38+@andrew.cmu.edu) +c +c DESCRIPTION: Pigeon hole problem of placing n (for file 'holen.cnf') pigeons +c in n+1 holes without placing 2 pigeons in the same hole +c +c NOTE: Part of the collection at the Forschungsinstitut fuer +c anwendungsorientierte Wissensverarbeitung in Ulm Germany. +c +c NOTE: Not satisfiable +c +p cnf 42 133 +-1 -7 0 +-1 -13 0 +-1 -19 0 +-1 -25 0 +-1 -31 0 +-1 -37 0 +-7 -13 0 +-7 -19 0 +-7 -25 0 +-7 -31 0 +-7 -37 0 +-13 -19 0 +-13 -25 0 +-13 -31 0 +-13 -37 0 +-19 -25 0 +-19 -31 0 +-19 -37 0 +-25 -31 0 +-25 -37 0 +-31 -37 0 +-2 -8 0 +-2 -14 0 +-2 -20 0 +-2 -26 0 +-2 -32 0 +-2 -38 0 +-8 -14 0 +-8 -20 0 +-8 -26 0 +-8 -32 0 +-8 -38 0 +-14 -20 0 +-14 -26 0 +-14 -32 0 +-14 -38 0 +-20 -26 0 +-20 -32 0 +-20 -38 0 +-26 -32 0 +-26 -38 0 +-32 -38 0 +-3 -9 0 +-3 -15 0 +-3 -21 0 +-3 -27 0 +-3 -33 0 +-3 -39 0 +-9 -15 0 +-9 -21 0 +-9 -27 0 +-9 -33 0 +-9 -39 0 +-15 -21 0 +-15 -27 0 +-15 -33 0 +-15 -39 0 +-21 -27 0 +-21 -33 0 +-21 -39 0 +-27 -33 0 +-27 -39 0 +-33 -39 0 +-4 -10 0 +-4 -16 0 +-4 -22 0 +-4 -28 0 +-4 -34 0 +-4 -40 0 +-10 -16 0 +-10 -22 0 +-10 -28 0 +-10 -34 0 +-10 -40 0 +-16 -22 0 +-16 -28 0 +-16 -34 0 +-16 -40 0 +-22 -28 0 +-22 -34 0 +-22 -40 0 +-28 -34 0 +-28 -40 0 +-34 -40 0 +-5 -11 0 +-5 -17 0 +-5 -23 0 +-5 -29 0 +-5 -35 0 +-5 -41 0 +-11 -17 0 +-11 -23 0 +-11 -29 0 +-11 -35 0 +-11 -41 0 +-17 -23 0 +-17 -29 0 +-17 -35 0 +-17 -41 0 +-23 -29 0 +-23 -35 0 +-23 -41 0 +-29 -35 0 +-29 -41 0 +-35 -41 0 +-6 -12 0 +-6 -18 0 +-6 -24 0 +-6 -30 0 +-6 -36 0 +-6 -42 0 +-12 -18 0 +-12 -24 0 +-12 -30 0 +-12 -36 0 +-12 -42 0 +-18 -24 0 +-18 -30 0 +-18 -36 0 +-18 -42 0 +-24 -30 0 +-24 -36 0 +-24 -42 0 +-30 -36 0 +-30 -42 0 +-36 -42 0 + 6 5 4 3 2 1 0 + 12 11 10 9 8 7 0 + 18 17 16 15 14 13 0 + 24 23 22 21 20 19 0 + 30 29 28 27 26 25 0 + 36 35 34 33 32 31 0 + 42 41 40 39 38 37 0 +""" + +f5 = """c simple example requiring variable selection +c +c NOTE: Satisfiable +c +p cnf 5 5 +1 2 3 0 +1 -2 3 0 +4 5 -3 0 +1 -4 -3 0 +-1 -5 0 +""" diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_inference.py b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..2b6f49224d6485c4e4f753de33aaf0cfe12154d6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_inference.py @@ -0,0 +1,381 @@ +"""For more tests on satisfiability, see test_dimacs""" + +from sympy.assumptions.ask import Q +from sympy.core.symbol import symbols +from sympy.core.relational import Unequality +from sympy.logic.boolalg import And, Or, Implies, Equivalent, true, false +from sympy.logic.inference import literal_symbol, \ + pl_true, satisfiable, valid, entails, PropKB +from sympy.logic.algorithms.dpll import dpll, dpll_satisfiable, \ + find_pure_symbol, find_unit_clause, unit_propagate, \ + find_pure_symbol_int_repr, find_unit_clause_int_repr, \ + unit_propagate_int_repr +from sympy.logic.algorithms.dpll2 import dpll_satisfiable as dpll2_satisfiable + +from sympy.logic.algorithms.z3_wrapper import z3_satisfiable +from sympy.assumptions.cnf import CNF, EncodedCNF +from sympy.logic.tests.test_lra_theory import make_random_problem +from sympy.core.random import randint + +from sympy.testing.pytest import raises, skip +from sympy.external import import_module + + +def test_literal(): + A, B = symbols('A,B') + assert literal_symbol(True) is True + assert literal_symbol(False) is False + assert literal_symbol(A) is A + assert literal_symbol(~A) is A + + +def test_find_pure_symbol(): + A, B, C = symbols('A,B,C') + assert find_pure_symbol([A], [A]) == (A, True) + assert find_pure_symbol([A, B], [~A | B, ~B | A]) == (None, None) + assert find_pure_symbol([A, B, C], [ A | ~B, ~B | ~C, C | A]) == (A, True) + assert find_pure_symbol([A, B, C], [~A | B, B | ~C, C | A]) == (B, True) + assert find_pure_symbol([A, B, C], [~A | ~B, ~B | ~C, C | A]) == (B, False) + assert find_pure_symbol( + [A, B, C], [~A | B, ~B | ~C, C | A]) == (None, None) + + +def test_find_pure_symbol_int_repr(): + assert find_pure_symbol_int_repr([1], [{1}]) == (1, True) + assert find_pure_symbol_int_repr([1, 2], + [{-1, 2}, {-2, 1}]) == (None, None) + assert find_pure_symbol_int_repr([1, 2, 3], + [{1, -2}, {-2, -3}, {3, 1}]) == (1, True) + assert find_pure_symbol_int_repr([1, 2, 3], + [{-1, 2}, {2, -3}, {3, 1}]) == (2, True) + assert find_pure_symbol_int_repr([1, 2, 3], + [{-1, -2}, {-2, -3}, {3, 1}]) == (2, False) + assert find_pure_symbol_int_repr([1, 2, 3], + [{-1, 2}, {-2, -3}, {3, 1}]) == (None, None) + + +def test_unit_clause(): + A, B, C = symbols('A,B,C') + assert find_unit_clause([A], {}) == (A, True) + assert find_unit_clause([A, ~A], {}) == (A, True) # Wrong ?? + assert find_unit_clause([A | B], {A: True}) == (B, True) + assert find_unit_clause([A | B], {B: True}) == (A, True) + assert find_unit_clause( + [A | B | C, B | ~C, A | ~B], {A: True}) == (B, False) + assert find_unit_clause([A | B | C, B | ~C, A | B], {A: True}) == (B, True) + assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True) + + +def test_unit_clause_int_repr(): + assert find_unit_clause_int_repr(map(set, [[1]]), {}) == (1, True) + assert find_unit_clause_int_repr(map(set, [[1], [-1]]), {}) == (1, True) + assert find_unit_clause_int_repr([{1, 2}], {1: True}) == (2, True) + assert find_unit_clause_int_repr([{1, 2}], {2: True}) == (1, True) + assert find_unit_clause_int_repr(map(set, + [[1, 2, 3], [2, -3], [1, -2]]), {1: True}) == (2, False) + assert find_unit_clause_int_repr(map(set, + [[1, 2, 3], [3, -3], [1, 2]]), {1: True}) == (2, True) + + A, B, C = symbols('A,B,C') + assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True) + + +def test_unit_propagate(): + A, B, C = symbols('A,B,C') + assert unit_propagate([A | B], A) == [] + assert unit_propagate([A | B, ~A | C, ~C | B, A], A) == [C, ~C | B, A] + + +def test_unit_propagate_int_repr(): + assert unit_propagate_int_repr([{1, 2}], 1) == [] + assert unit_propagate_int_repr(map(set, + [[1, 2], [-1, 3], [-3, 2], [1]]), 1) == [{3}, {-3, 2}] + + +def test_dpll(): + """This is also tested in test_dimacs""" + A, B, C = symbols('A,B,C') + assert dpll([A | B], [A, B], {A: True, B: True}) == {A: True, B: True} + + +def test_dpll_satisfiable(): + A, B, C = symbols('A,B,C') + assert dpll_satisfiable( A & ~A ) is False + assert dpll_satisfiable( A & ~B ) == {A: True, B: False} + assert dpll_satisfiable( + A | B ) in ({A: True}, {B: True}, {A: True, B: True}) + assert dpll_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert dpll_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False}, + {A: True, C: True}, {B: True, C: True}) + assert dpll_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert dpll_satisfiable( (A | B) & (A >> B) ) == {B: True} + assert dpll_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert dpll_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + + +def test_dpll2_satisfiable(): + A, B, C = symbols('A,B,C') + assert dpll2_satisfiable( A & ~A ) is False + assert dpll2_satisfiable( A & ~B ) == {A: True, B: False} + assert dpll2_satisfiable( + A | B ) in ({A: True}, {B: True}, {A: True, B: True}) + assert dpll2_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert dpll2_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False, C: True}, + {A: True, B: True, C: True}) + assert dpll2_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert dpll2_satisfiable( (A | B) & (A >> B) ) in ({B: True, A: False}, + {B: True, A: True}) + assert dpll2_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert dpll2_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + + +def test_minisat22_satisfiable(): + A, B, C = symbols('A,B,C') + minisat22_satisfiable = lambda expr: satisfiable(expr, algorithm="minisat22") + assert minisat22_satisfiable( A & ~A ) is False + assert minisat22_satisfiable( A & ~B ) == {A: True, B: False} + assert minisat22_satisfiable( + A | B ) in ({A: True}, {B: False}, {A: False, B: True}, {A: True, B: True}, {A: True, B: False}) + assert minisat22_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert minisat22_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False, C: True}, + {A: True, B: True, C: True}, {A: False, B: True, C: True}, {A: True, B: False, C: False}) + assert minisat22_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert minisat22_satisfiable( (A | B) & (A >> B) ) in ({B: True, A: False}, + {B: True, A: True}) + assert minisat22_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert minisat22_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + +def test_minisat22_minimal_satisfiable(): + A, B, C = symbols('A,B,C') + minisat22_satisfiable = lambda expr, minimal=True: satisfiable(expr, algorithm="minisat22", minimal=True) + assert minisat22_satisfiable( A & ~A ) is False + assert minisat22_satisfiable( A & ~B ) == {A: True, B: False} + assert minisat22_satisfiable( + A | B ) in ({A: True}, {B: False}, {A: False, B: True}, {A: True, B: True}, {A: True, B: False}) + assert minisat22_satisfiable( + (~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False}) + assert minisat22_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False, C: True}, + {A: True, B: True, C: True}, {A: False, B: True, C: True}, {A: True, B: False, C: False}) + assert minisat22_satisfiable( A & B & C ) == {A: True, B: True, C: True} + assert minisat22_satisfiable( (A | B) & (A >> B) ) in ({B: True, A: False}, + {B: True, A: True}) + assert minisat22_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True} + assert minisat22_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False} + g = satisfiable((A | B | C),algorithm="minisat22",minimal=True,all_models=True) + sol = next(g) + first_solution = {key for key, value in sol.items() if value} + sol=next(g) + second_solution = {key for key, value in sol.items() if value} + sol=next(g) + third_solution = {key for key, value in sol.items() if value} + assert not first_solution <= second_solution + assert not second_solution <= third_solution + assert not first_solution <= third_solution + +def test_satisfiable(): + A, B, C = symbols('A,B,C') + assert satisfiable(A & (A >> B) & ~B) is False + + +def test_valid(): + A, B, C = symbols('A,B,C') + assert valid(A >> (B >> A)) is True + assert valid((A >> (B >> C)) >> ((A >> B) >> (A >> C))) is True + assert valid((~B >> ~A) >> (A >> B)) is True + assert valid(A | B | C) is False + assert valid(A >> B) is False + + +def test_pl_true(): + A, B, C = symbols('A,B,C') + assert pl_true(True) is True + assert pl_true( A & B, {A: True, B: True}) is True + assert pl_true( A | B, {A: True}) is True + assert pl_true( A | B, {B: True}) is True + assert pl_true( A | B, {A: None, B: True}) is True + assert pl_true( A >> B, {A: False}) is True + assert pl_true( A | B | ~C, {A: False, B: True, C: True}) is True + assert pl_true(Equivalent(A, B), {A: False, B: False}) is True + + # test for false + assert pl_true(False) is False + assert pl_true( A & B, {A: False, B: False}) is False + assert pl_true( A & B, {A: False}) is False + assert pl_true( A & B, {B: False}) is False + assert pl_true( A | B, {A: False, B: False}) is False + + #test for None + assert pl_true(B, {B: None}) is None + assert pl_true( A & B, {A: True, B: None}) is None + assert pl_true( A >> B, {A: True, B: None}) is None + assert pl_true(Equivalent(A, B), {A: None}) is None + assert pl_true(Equivalent(A, B), {A: True, B: None}) is None + + # Test for deep + assert pl_true(A | B, {A: False}, deep=True) is None + assert pl_true(~A & ~B, {A: False}, deep=True) is None + assert pl_true(A | B, {A: False, B: False}, deep=True) is False + assert pl_true(A & B & (~A | ~B), {A: True}, deep=True) is False + assert pl_true((C >> A) >> (B >> A), {C: True}, deep=True) is True + + +def test_pl_true_wrong_input(): + from sympy.core.numbers import pi + raises(ValueError, lambda: pl_true('John Cleese')) + raises(ValueError, lambda: pl_true(42 + pi + pi ** 2)) + raises(ValueError, lambda: pl_true(42)) + + +def test_entails(): + A, B, C = symbols('A, B, C') + assert entails(A, [A >> B, ~B]) is False + assert entails(B, [Equivalent(A, B), A]) is True + assert entails((A >> B) >> (~A >> ~B)) is False + assert entails((A >> B) >> (~B >> ~A)) is True + + +def test_PropKB(): + A, B, C = symbols('A,B,C') + kb = PropKB() + assert kb.ask(A >> B) is False + assert kb.ask(A >> (B >> A)) is True + kb.tell(A >> B) + kb.tell(B >> C) + assert kb.ask(A) is False + assert kb.ask(B) is False + assert kb.ask(C) is False + assert kb.ask(~A) is False + assert kb.ask(~B) is False + assert kb.ask(~C) is False + assert kb.ask(A >> C) is True + kb.tell(A) + assert kb.ask(A) is True + assert kb.ask(B) is True + assert kb.ask(C) is True + assert kb.ask(~C) is False + kb.retract(A) + assert kb.ask(C) is False + + +def test_propKB_tolerant(): + """"tolerant to bad input""" + kb = PropKB() + A, B, C = symbols('A,B,C') + assert kb.ask(B) is False + +def test_satisfiable_non_symbols(): + x, y = symbols('x y') + assumptions = Q.zero(x*y) + facts = Implies(Q.zero(x*y), Q.zero(x) | Q.zero(y)) + query = ~Q.zero(x) & ~Q.zero(y) + refutations = [ + {Q.zero(x): True, Q.zero(x*y): True}, + {Q.zero(y): True, Q.zero(x*y): True}, + {Q.zero(x): True, Q.zero(y): True, Q.zero(x*y): True}, + {Q.zero(x): True, Q.zero(y): False, Q.zero(x*y): True}, + {Q.zero(x): False, Q.zero(y): True, Q.zero(x*y): True}] + assert not satisfiable(And(assumptions, facts, query), algorithm='dpll') + assert satisfiable(And(assumptions, facts, ~query), algorithm='dpll') in refutations + assert not satisfiable(And(assumptions, facts, query), algorithm='dpll2') + assert satisfiable(And(assumptions, facts, ~query), algorithm='dpll2') in refutations + +def test_satisfiable_bool(): + from sympy.core.singleton import S + assert satisfiable(true) == {true: true} + assert satisfiable(S.true) == {true: true} + assert satisfiable(false) is False + assert satisfiable(S.false) is False + + +def test_satisfiable_all_models(): + from sympy.abc import A, B + assert next(satisfiable(False, all_models=True)) is False + assert list(satisfiable((A >> ~A) & A, all_models=True)) == [False] + assert list(satisfiable(True, all_models=True)) == [{true: true}] + + models = [{A: True, B: False}, {A: False, B: True}] + result = satisfiable(A ^ B, all_models=True) + models.remove(next(result)) + models.remove(next(result)) + raises(StopIteration, lambda: next(result)) + assert not models + + assert list(satisfiable(Equivalent(A, B), all_models=True)) == \ + [{A: False, B: False}, {A: True, B: True}] + + models = [{A: False, B: False}, {A: False, B: True}, {A: True, B: True}] + for model in satisfiable(A >> B, all_models=True): + models.remove(model) + assert not models + + # This is a santiy test to check that only the required number + # of solutions are generated. The expr below has 2**100 - 1 models + # which would time out the test if all are generated at once. + from sympy.utilities.iterables import numbered_symbols + from sympy.logic.boolalg import Or + sym = numbered_symbols() + X = [next(sym) for i in range(100)] + result = satisfiable(Or(*X), all_models=True) + for i in range(10): + assert next(result) + + +def test_z3(): + z3 = import_module("z3") + + if not z3: + skip("z3 not installed.") + A, B, C = symbols('A,B,C') + x, y, z = symbols('x,y,z') + assert z3_satisfiable((x >= 2) & (x < 1)) is False + assert z3_satisfiable( A & ~A ) is False + + model = z3_satisfiable(A & (~A | B | C)) + assert bool(model) is True + assert model[A] is True + + # test nonlinear function + assert z3_satisfiable((x ** 2 >= 2) & (x < 1) & (x > -1)) is False + + +def test_z3_vs_lra_dpll2(): + z3 = import_module("z3") + if z3 is None: + skip("z3 not installed.") + + def boolean_formula_to_encoded_cnf(bf): + cnf = CNF.from_prop(bf) + enc = EncodedCNF() + enc.from_cnf(cnf) + return enc + + def make_random_cnf(num_clauses=5, num_constraints=10, num_var=2): + assert num_clauses <= num_constraints + constraints = make_random_problem(num_variables=num_var, num_constraints=num_constraints, rational=False) + clauses = [[cons] for cons in constraints[:num_clauses]] + for cons in constraints[num_clauses:]: + if isinstance(cons, Unequality): + cons = ~cons + i = randint(0, num_clauses-1) + clauses[i].append(cons) + + clauses = [Or(*clause) for clause in clauses] + cnf = And(*clauses) + return boolean_formula_to_encoded_cnf(cnf) + + lra_dpll2_satisfiable = lambda x: dpll2_satisfiable(x, use_lra_theory=True) + + for _ in range(50): + cnf = make_random_cnf(num_clauses=10, num_constraints=15, num_var=2) + + try: + z3_sat = z3_satisfiable(cnf) + except z3.z3types.Z3Exception: + continue + + lra_dpll2_sat = lra_dpll2_satisfiable(cnf) is not False + + assert z3_sat == lra_dpll2_sat diff --git a/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_lra_theory.py b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_lra_theory.py new file mode 100644 index 0000000000000000000000000000000000000000..207a3c5ba2c1b16ee5323382deee0863a5dfb595 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/logic/tests/test_lra_theory.py @@ -0,0 +1,440 @@ +from sympy.core.numbers import Rational, I, oo +from sympy.core.relational import Eq +from sympy.core.symbol import symbols +from sympy.core.singleton import S +from sympy.matrices.dense import Matrix +from sympy.matrices.dense import randMatrix +from sympy.assumptions.ask import Q +from sympy.logic.boolalg import And +from sympy.abc import x, y, z +from sympy.assumptions.cnf import CNF, EncodedCNF +from sympy.functions.elementary.trigonometric import cos +from sympy.external import import_module + +from sympy.logic.algorithms.lra_theory import LRASolver, UnhandledInput, LRARational, HANDLE_NEGATION +from sympy.core.random import random, choice, randint +from sympy.core.sympify import sympify +from sympy.ntheory.generate import randprime +from sympy.core.relational import StrictLessThan, StrictGreaterThan +import itertools + +from sympy.testing.pytest import raises, XFAIL, skip + +def make_random_problem(num_variables=2, num_constraints=2, sparsity=.1, rational=True, + disable_strict = False, disable_nonstrict=False, disable_equality=False): + def rand(sparsity=sparsity): + if random() < sparsity: + return sympify(0) + if rational: + int1, int2 = [randprime(0, 50) for _ in range(2)] + return Rational(int1, int2) * choice([-1, 1]) + else: + return randint(1, 10) * choice([-1, 1]) + + variables = symbols('x1:%s' % (num_variables + 1)) + constraints = [] + for _ in range(num_constraints): + lhs, rhs = sum(rand() * x for x in variables), rand(sparsity=0) # sparsity=0 bc of bug with smtlib_code + options = [] + if not disable_equality: + options += [Eq(lhs, rhs)] + if not disable_nonstrict: + options += [lhs <= rhs, lhs >= rhs] + if not disable_strict: + options += [lhs < rhs, lhs > rhs] + + constraints.append(choice(options)) + + return constraints + +def check_if_satisfiable_with_z3(constraints): + from sympy.external.importtools import import_module + from sympy.printing.smtlib import smtlib_code + from sympy.logic.boolalg import And + boolean_formula = And(*constraints) + z3 = import_module("z3") + if z3: + smtlib_string = smtlib_code(boolean_formula) + s = z3.Solver() + s.from_string(smtlib_string) + res = str(s.check()) + if res == 'sat': + return True + elif res == 'unsat': + return False + else: + raise ValueError(f"z3 was not able to check the satisfiability of {boolean_formula}") + +def find_rational_assignment(constr, assignment, iter=20): + eps = sympify(1) + + for _ in range(iter): + assign = {key: val[0] + val[1]*eps for key, val in assignment.items()} + try: + for cons in constr: + assert cons.subs(assign) == True + return assign + except AssertionError: + eps = eps/2 + + return None + +def boolean_formula_to_encoded_cnf(bf): + cnf = CNF.from_prop(bf) + enc = EncodedCNF() + enc.from_cnf(cnf) + return enc + + +def test_from_encoded_cnf(): + s1, s2 = symbols("s1 s2") + + # Test preprocessing + # Example is from section 3 of paper. + phi = (x >= 0) & ((x + y <= 2) | (x + 2 * y - z >= 6)) & (Eq(x + y, 2) | (x + 2 * y - z > 4)) + enc = boolean_formula_to_encoded_cnf(phi) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + assert lra.A.shape == (2, 5) + assert str(lra.slack) == '[_s1, _s2]' + assert str(lra.nonslack) == '[x, y, z]' + assert lra.A == Matrix([[ 1, 1, 0, -1, 0], + [-1, -2, 1, 0, -1]]) + assert {(str(b.var), b.bound, b.upper, b.equality, b.strict) for b in lra.enc_to_boundary.values()} == {('_s1', 2, None, True, False), + ('_s1', 2, True, False, False), + ('_s2', -4, True, False, True), + ('_s2', -6, True, False, False), + ('x', 0, False, False, False)} + + +def test_problem(): + from sympy.logic.algorithms.lra_theory import LRASolver + from sympy.assumptions.cnf import CNF, EncodedCNF + cons = [-2 * x - 2 * y >= 7, -9 * y >= 7, -6 * y >= 5] + cnf = CNF().from_prop(And(*cons)) + enc = EncodedCNF() + enc.from_cnf(cnf) + lra, _ = LRASolver.from_encoded_cnf(enc) + lra.assert_lit(1) + lra.assert_lit(2) + lra.assert_lit(3) + is_sat, assignment = lra.check() + assert is_sat is True + + +def test_random_problems(): + z3 = import_module("z3") + if z3 is None: + skip("z3 is not installed") + + special_cases = []; x1, x2, x3 = symbols("x1 x2 x3") + special_cases.append([x1 - 3 * x2 <= -5, 6 * x1 + 4 * x2 <= 0, -7 * x1 + 3 * x2 <= 3]) + special_cases.append([-3 * x1 >= 3, Eq(4 * x1, -1)]) + special_cases.append([-4 * x1 < 4, 6 * x1 <= -6]) + special_cases.append([-3 * x2 >= 7, 6 * x1 <= -5, -3 * x2 <= -4]) + special_cases.append([x + y >= 2, x + y <= 1]) + special_cases.append([x >= 0, x + y <= 2, x + 2 * y - z >= 6]) # from paper example + special_cases.append([-2 * x1 - 2 * x2 >= 7, -9 * x1 >= 7, -6 * x1 >= 5]) + special_cases.append([2 * x1 > -3, -9 * x1 < -6, 9 * x1 <= 6]) + special_cases.append([-2*x1 < -4, 9*x1 > -9]) + special_cases.append([-6*x1 >= -1, -8*x1 + x2 >= 5, -8*x1 + 7*x2 < 4, x1 > 7]) + special_cases.append([Eq(x1, 2), Eq(5*x1, -2), Eq(-7*x2, -6), Eq(9*x1 + 10*x2, 9)]) + special_cases.append([Eq(3*x1, 6), Eq(x1 - 8*x2, -9), Eq(-7*x1 + 5*x2, 3), Eq(3*x2, 7)]) + special_cases.append([-4*x1 < 4, 6*x1 <= -6]) + special_cases.append([-3*x1 + 8*x2 >= -8, -10*x2 > 9, 8*x1 - 4*x2 < 8, 10*x1 - 9*x2 >= -9]) + special_cases.append([x1 + 5*x2 >= -6, 9*x1 - 3*x2 >= -9, 6*x1 + 6*x2 < -10, -3*x1 + 3*x2 < -7]) + special_cases.append([-9*x1 < 7, -5*x1 - 7*x2 < -1, 3*x1 + 7*x2 > 1, -6*x1 - 6*x2 > 9]) + special_cases.append([9*x1 - 6*x2 >= -7, 9*x1 + 4*x2 < -8, -7*x2 <= 1, 10*x2 <= -7]) + + feasible_count = 0 + for i in range(50): + if i % 8 == 0: + constraints = make_random_problem(num_variables=1, num_constraints=2, rational=False) + elif i % 8 == 1: + constraints = make_random_problem(num_variables=2, num_constraints=4, rational=False, disable_equality=True, + disable_nonstrict=True) + elif i % 8 == 2: + constraints = make_random_problem(num_variables=2, num_constraints=4, rational=False, disable_strict=True) + elif i % 8 == 3: + constraints = make_random_problem(num_variables=3, num_constraints=12, rational=False) + else: + constraints = make_random_problem(num_variables=3, num_constraints=6, rational=False) + + if i < len(special_cases): + constraints = special_cases[i] + + if False in constraints or True in constraints: + continue + + phi = And(*constraints) + if phi == False: + continue + cnf = CNF.from_prop(phi); enc = EncodedCNF() + enc.from_cnf(cnf) + assert all(0 not in clause for clause in enc.data) + + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + s_subs = lra.s_subs + + lra.run_checks = True + s_subs_rev = {value: key for key, value in s_subs.items()} + lits = {lit for clause in enc.data for lit in clause} + + bounds = [(lra.enc_to_boundary[l], l) for l in lits if l in lra.enc_to_boundary] + bounds = sorted(bounds, key=lambda x: (str(x[0].var), x[0].bound, str(x[0].upper))) # to remove nondeterminism + + for b, l in bounds: + if lra.result and lra.result[0] == False: + break + lra.assert_lit(l) + + feasible = lra.check() + + if feasible[0] == True: + feasible_count += 1 + assert check_if_satisfiable_with_z3(constraints) is True + cons_funcs = [cons.func for cons in constraints] + assignment = feasible[1] + assignment = {key.var : value for key, value in assignment.items()} + if not (StrictLessThan in cons_funcs or StrictGreaterThan in cons_funcs): + assignment = {key: value[0] for key, value in assignment.items()} + for cons in constraints: + assert cons.subs(assignment) == True + + else: + rat_assignment = find_rational_assignment(constraints, assignment) + assert rat_assignment is not None + else: + assert check_if_satisfiable_with_z3(constraints) is False + + conflict = feasible[1] + assert len(conflict) >= 2 + conflict = {lra.enc_to_boundary[-l].get_inequality() for l in conflict} + conflict = {clause.subs(s_subs_rev) for clause in conflict} + assert check_if_satisfiable_with_z3(conflict) is False + + # check that conflict clause is probably minimal + for subset in itertools.combinations(conflict, len(conflict)-1): + assert check_if_satisfiable_with_z3(subset) is True + + +@XFAIL +def test_pos_neg_zero(): + bf = Q.positive(x) & Q.negative(x) & Q.zero(y) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 3 + assert lra.check()[0] == False + + bf = Q.positive(x) & Q.lt(x, -1) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + + bf = Q.positive(x) & Q.zero(x) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + + bf = Q.positive(x) & Q.zero(y) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == True + + +@XFAIL +def test_pos_neg_infinite(): + bf = Q.positive_infinite(x) & Q.lt(x, 10000000) & Q.positive_infinite(y) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 3 + assert lra.check()[0] == False + + bf = Q.positive_infinite(x) & Q.gt(x, 10000000) & Q.positive_infinite(y) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 3 + assert lra.check()[0] == True + + bf = Q.positive_infinite(x) & Q.negative_infinite(x) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in enc.encoding.values(): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + + +def test_binrel_evaluation(): + bf = Q.gt(3, 2) + enc = boolean_formula_to_encoded_cnf(bf) + lra, conflicts = LRASolver.from_encoded_cnf(enc, testing_mode=True) + assert len(lra.enc_to_boundary) == 0 + assert conflicts == [[1]] + + bf = Q.lt(3, 2) + enc = boolean_formula_to_encoded_cnf(bf) + lra, conflicts = LRASolver.from_encoded_cnf(enc, testing_mode=True) + assert len(lra.enc_to_boundary) == 0 + assert conflicts == [[-1]] + + +def test_negation(): + assert HANDLE_NEGATION is True + bf = Q.gt(x, 1) & ~Q.gt(x, 0) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for clause in enc.data: + for lit in clause: + lra.assert_lit(lit) + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + assert sorted(lra.check()[1]) in [[-1, 2], [-2, 1]] + + bf = ~Q.gt(x, 1) & ~Q.lt(x, 0) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for clause in enc.data: + for lit in clause: + lra.assert_lit(lit) + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == True + + bf = ~Q.gt(x, 0) & ~Q.lt(x, 1) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for clause in enc.data: + for lit in clause: + lra.assert_lit(lit) + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + + bf = ~Q.gt(x, 0) & ~Q.le(x, 0) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for clause in enc.data: + for lit in clause: + lra.assert_lit(lit) + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + + bf = ~Q.le(x+y, 2) & ~Q.ge(x-y, 2) & ~Q.ge(y, 0) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for clause in enc.data: + for lit in clause: + lra.assert_lit(lit) + assert len(lra.enc_to_boundary) == 3 + assert lra.check()[0] == False + assert len(lra.check()[1]) == 3 + assert all(i > 0 for i in lra.check()[1]) + + +def test_unhandled_input(): + nan = S.NaN + bf = Q.gt(3, nan) & Q.gt(x, nan) + enc = boolean_formula_to_encoded_cnf(bf) + raises(ValueError, lambda: LRASolver.from_encoded_cnf(enc, testing_mode=True)) + + bf = Q.gt(3, I) & Q.gt(x, I) + enc = boolean_formula_to_encoded_cnf(bf) + raises(UnhandledInput, lambda: LRASolver.from_encoded_cnf(enc, testing_mode=True)) + + bf = Q.gt(3, float("inf")) & Q.gt(x, float("inf")) + enc = boolean_formula_to_encoded_cnf(bf) + raises(UnhandledInput, lambda: LRASolver.from_encoded_cnf(enc, testing_mode=True)) + + bf = Q.gt(3, oo) & Q.gt(x, oo) + enc = boolean_formula_to_encoded_cnf(bf) + raises(UnhandledInput, lambda: LRASolver.from_encoded_cnf(enc, testing_mode=True)) + + # test non-linearity + bf = Q.gt(x**2 + x, 2) + enc = boolean_formula_to_encoded_cnf(bf) + raises(UnhandledInput, lambda: LRASolver.from_encoded_cnf(enc, testing_mode=True)) + + bf = Q.gt(cos(x) + x, 2) + enc = boolean_formula_to_encoded_cnf(bf) + raises(UnhandledInput, lambda: LRASolver.from_encoded_cnf(enc, testing_mode=True)) + +@XFAIL +def test_infinite_strict_inequalities(): + # Extensive testing of the interaction between strict inequalities + # and constraints containing infinity is needed because + # the paper's rule for strict inequalities don't work when + # infinite numbers are allowed. Using the paper's rules you + # can end up with situations where oo + delta > oo is considered + # True when oo + delta should be equal to oo. + # See https://math.stackexchange.com/questions/4757069/can-this-method-of-converting-strict-inequalities-to-equisatisfiable-nonstrict-i + bf = (-x - y >= -float("inf")) & (x > 0) & (y >= float("inf")) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for lit in sorted(enc.encoding.values()): + if lra.assert_lit(lit) is not None: + break + assert len(lra.enc_to_boundary) == 3 + assert lra.check()[0] == True + + +def test_pivot(): + for _ in range(10): + m = randMatrix(5) + rref = m.rref() + for _ in range(5): + i, j = randint(0, 4), randint(0, 4) + if m[i, j] != 0: + assert LRASolver._pivot(m, i, j).rref() == rref + + +def test_reset_bounds(): + bf = Q.ge(x, 1) & Q.lt(x, 1) + enc = boolean_formula_to_encoded_cnf(bf) + lra, _ = LRASolver.from_encoded_cnf(enc, testing_mode=True) + for clause in enc.data: + for lit in clause: + lra.assert_lit(lit) + assert len(lra.enc_to_boundary) == 2 + assert lra.check()[0] == False + + lra.reset_bounds() + assert lra.check()[0] == True + for var in lra.all_var: + assert var.upper == LRARational(float("inf"), 0) + assert var.upper_from_eq == False + assert var.upper_from_neg == False + assert var.lower == LRARational(-float("inf"), 0) + assert var.lower_from_eq == False + assert var.lower_from_neg == False + assert var.assign == LRARational(0, 0) + assert var.var is not None + assert var.col_idx is not None + + +def test_empty_cnf(): + cnf = CNF() + enc = EncodedCNF() + enc.from_cnf(cnf) + lra, conflict = LRASolver.from_encoded_cnf(enc) + assert len(conflict) == 0 + assert lra.check() == (True, {}) diff --git a/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png b/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png new file mode 100644 index 0000000000000000000000000000000000000000..51c241e9825c28047cdd31fd64020ecb956a19e4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dceffdfe73d6d78f453142c4713e51a88dbe9361f79c710b6df2400edd9c3bc9 +size 7939 diff --git a/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png b/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png new file mode 100644 index 0000000000000000000000000000000000000000..7f9cc7cf23bec219b8d6101c4cbae235a2c678d1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e41ba0d3dbf2a20f82bb79a4cbba5bb458dec396ccbdba5ed195d6b200ca7f2e +size 8809 diff --git a/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png b/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png new file mode 100644 index 0000000000000000000000000000000000000000..cafdc56f650a8c4d7af38fdfd8206891aa9d6cc2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92e71558103d03df0ea5c47876277968b5d4ca8ab8cf43b80b73cce9d962052c +size 10002 diff --git a/vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb8f59083cd2ba7d554f96dfe1ff833be7203bd1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:198c1f6500eb98c28c7685a6757982fb500967dff6cae320aab4215df81b67b1 +size 154761 diff --git a/vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6812c795301d21d530ec986d2e2718361d7edbe9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74a9266e3935389d170cb9b6cd7cd00a5e5379e296f65dbfa3bcc932322ae062 +size 112032