diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a40f8edfc1397152c931fc33f076f8f658621373 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Float_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Int_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Int_native.h new file mode 100644 index 0000000000000000000000000000000000000000..788a1b98bf54574b74149054599fcf310d1bfa67 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Int_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f15e3eec08f61512219fd9ef918518b64a7e7808 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack.h new file mode 100644 index 0000000000000000000000000000000000000000..27316442a3b3c6a2b5516e15755746e881f6b74a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_convert_weight_to_int4pack.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_convert_weight_to_int4pack(Tensor self, int innerKTiles) -> Tensor +inline at::Tensor _convert_weight_to_int4pack(const at::Tensor & self, int64_t innerKTiles) { + return at::_ops::_convert_weight_to_int4pack::call(self, innerKTiles); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_ctc_loss_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_ctc_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1ec555a6ab922737630fc9c9ee1aaf43758183b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_ctc_loss_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false); +TORCH_API ::std::tuple _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..6507d55c950d3a5da807e3435ebd3605123b5a87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) +inline ::std::tuple _cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); +} + +// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) +inline ::std::tuple _cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); +} + +// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _cudnn_ctc_loss_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); +} +// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _cudnn_ctc_loss_outf(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_cudnn_ctc_loss_out::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d9c1280be606d46f0dca7a109c1695decbf4963 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_rnn_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _cudnn_rnn_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +TORCH_API void _cudnn_rnn_backward_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); +TORCH_API void _cudnn_rnn_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +TORCH_API void _cudnn_rnn_backward_symint_outf(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size.h new file mode 100644 index 0000000000000000000000000000000000000000..c8a600b67561259f9fe4a430f4f8ff21ff5facde --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cufft_get_plan_cache_size(DeviceIndex device_index) -> int +inline int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index) { + return at::_ops::_cufft_get_plan_cache_size::call(device_index); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ef5905ad4b059155f5acde936731182e397fcb6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _embedding_bag(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const ::std::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_add_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_add_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..acb46d74bfc2dbb7bc024d803d47e7ed1134a25b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_add_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_add_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_add__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_add_List { + using schema = ::std::vector (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other, const at::Scalar & alpha); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_add__List { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()") + static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_add_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_add__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_add_Tensor { + using schema = ::std::vector (at::TensorList, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_add__Tensor { + using schema = void (at::TensorList, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> ()") + static void call(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_add_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_add_List_out { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); +}; + +struct TORCH_API _foreach_add_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +struct TORCH_API _foreach_add_Tensor_out { + using schema = void (at::TensorList, const at::Tensor &, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_add") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_cos.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_cos.h new file mode 100644 index 0000000000000000000000000000000000000000..0a95bb778450899e83badd0f4522ea752207ebee --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_cos.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_cos(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_cos(at::TensorList self) { + return at::_ops::_foreach_cos::call(self); +} + +// aten::_foreach_cos_(Tensor(a!)[] self) -> () +inline void _foreach_cos_(at::TensorList self) { + return at::_ops::_foreach_cos_::call(self); +} + +// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_cos_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_cos_out::call(self, out); +} +// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_cos_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_cos_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d6c5d3515c9bc8f66a412bdb689e03e0cb445a2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_sin_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_sin(at::TensorList self); +TORCH_API void _foreach_sin_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_sin_outf(at::TensorList self, at::TensorList out); +TORCH_API void _foreach_sin_(at::TensorList self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9b1bfcd1f037556aca59dd99dd8f7b236a859b2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_sub_ops.h @@ -0,0 +1,116 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_sub_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_sub__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_sub_List { + using schema = ::std::vector (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other, const at::Scalar & alpha); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_sub__List { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()") + static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha); +}; + +struct TORCH_API _foreach_sub_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_sub__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_sub_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_sub_List_out { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out); +}; + +struct TORCH_API _foreach_sub_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sub") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_indices.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..db13fccd8880c914c43cb2f5d5af96f27db06e4b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_is_zerotensor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_is_zerotensor.h new file mode 100644 index 0000000000000000000000000000000000000000..0adf7e10b446bbad3b122fdf0618483f8ae422b5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_is_zerotensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_is_zerotensor(Tensor self) -> bool +inline bool __dispatch__is_zerotensor(const at::Tensor & self) { + return at::_ops::_is_zerotensor::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37530952213f8af0b831467f6a5638b54e03bdc3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_neg_view_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _neg_view(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..71bc8d296f094eee21d0438ec0dc3dfb2ed5da4c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _scaled_dot_product_attention_math(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, const ::std::optional & dropout_mask={}, ::std::optional scale=::std::nullopt, bool enable_gqa=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b9d2e5fd5be542077158871f57ae9c937e4f7895 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_for_mps_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mask_projection.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mask_projection.h new file mode 100644 index 0000000000000000000000000000000000000000..1adfcea9f67281e76f4d259bcbf6e187314a17f6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mask_projection.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_mask_projection_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches=false) { + return at::_ops::_sparse_mask_projection_out::call(self, mask, accumulate_matches, out); +} +// aten::_sparse_mask_projection.out(Tensor self, Tensor mask, bool accumulate_matches=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_mask_projection_outf(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches, at::Tensor & out) { + return at::_ops::_sparse_mask_projection_out::call(self, mask, accumulate_matches, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_semi_structured_tile.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_semi_structured_tile.h new file mode 100644 index 0000000000000000000000000000000000000000..ba331ca7fe3f35181825d75fd3b2fed31ff63659 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_semi_structured_tile.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _sparse_semi_structured_tile(const at::Tensor & input, c10::string_view algorithm="", bool use_cutlass=true) { + return at::_ops::_sparse_semi_structured_tile::call(input, algorithm, use_cutlass); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_optional_intlist_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_optional_intlist_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ea10a97ea04ad177b2ce22fc4d673b51ac1d7f40 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_optional_intlist_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _test_optional_intlist_out(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out); +TORCH_API at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addmv_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addmv_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..3ba6e278fdac4eb9bf93ce8d4478d420b2b7f923 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addmv_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_addmv : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arange.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arange.h new file mode 100644 index 0000000000000000000000000000000000000000..1bee7b7bb4a41d805be1d17734c9e7f9a31986cf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arange.h @@ -0,0 +1,70 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::arange::call(end, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor arange(const at::Scalar & end, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::arange::call(end, dtype, layout, device, pin_memory); +} + +// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::arange_start::call(start, end, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::arange_start::call(start, end, dtype, layout, device, pin_memory); +} + +// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options={}) { + return at::_ops::arange_start_step::call(start, end, step, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::arange_start_step::call(start, end, step, dtype, layout, device, pin_memory); +} + +// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arange_out(at::Tensor & out, const at::Scalar & end) { + return at::_ops::arange_out::call(end, out); +} +// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arange_outf(const at::Scalar & end, at::Tensor & out) { + return at::_ops::arange_out::call(end, out); +} + +// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arange_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) { + return at::_ops::arange_start_out::call(start, end, step, out); +} +// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & arange_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) { + return at::_ops::arange_start_out::call(start, end, step, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/as_strided_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/as_strided_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e189a312749e8eb219a7b148d9f79406f815e3ba --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/as_strided_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API as_strided { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset); +}; + +struct TORCH_API as_strided_ { + using schema = const at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::as_strided_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1a20c148477977a5f2f9d3bf12eba07535094063 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_asin_out : public at::meta::structured_asin { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor asin_sparse(const at::Tensor & self); +TORCH_API at::Tensor & asin_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asin_sparse_(at::Tensor & self); +TORCH_API at::Tensor asin_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & asin_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asin_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/atanh.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/atanh.h new file mode 100644 index 0000000000000000000000000000000000000000..5254002a82b6a8192e924115d3bb4f645ff99f43 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/atanh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::atanh(Tensor self) -> Tensor +inline at::Tensor atanh(const at::Tensor & self) { + return at::_ops::atanh::call(self); +} + +// aten::atanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & atanh_(at::Tensor & self) { + return at::_ops::atanh_::call(self); +} + +// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::atanh_out::call(self, out); +} +// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::atanh_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_not_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_not_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..da1e3402c258cb5c34b47c00c6d1c4c389a3bcd8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_not_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_bitwise_not : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7457b61df7061093fc3889ad4a2045a2532ba370 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor clamp(const at::Tensor & self, const ::std::optional & min, const ::std::optional & max=::std::nullopt); +TORCH_API at::Tensor & clamp_(at::Tensor & self, const ::std::optional & min, const ::std::optional & max=::std::nullopt); +TORCH_API at::Tensor clamp(const at::Tensor & self, const ::std::optional & min={}, const ::std::optional & max={}); +TORCH_API at::Tensor & clamp_(at::Tensor & self, const ::std::optional & min={}, const ::std::optional & max={}); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/coalesce_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/coalesce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..626df101f74f73cdf17dd1eddeaf074490bdf1f2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/coalesce_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API coalesce { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::coalesce") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "coalesce(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/corrcoef_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/corrcoef_native.h new file mode 100644 index 0000000000000000000000000000000000000000..20b6127377beb61089a37539b23e1644903ff85f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/corrcoef_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor corrcoef(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..702bfc6b5cf7e44fdbae0cbcb1520f0f0dc15f32 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..caba613590a82bfe94f46e78313151495f4310b1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/exp_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor exp(const at::Tensor & self); +TORCH_API at::Tensor & exp_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e58635d893b27a96dd5cdbacb712f2175ead1f2e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fractional_max_pool2d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..75a9fed675d8c191464e5d3d0ccf742309e189e8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frobenius_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API frobenius_norm_dim { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::frobenius_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API frobenius_norm_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::frobenius_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2adb1b7959e5e9097e388c0ea27506b7ffd47677 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1abc1ed59555bb1223fa9b4fa14beb50b2da6cc0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_add.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_add.h new file mode 100644 index 0000000000000000000000000000000000000000..081e321ce6619fdb6ccc7758e0f58baa0f25b07c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_add.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_out::call(self, dim, index, source, alpha, out); +} +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::index_add_out::call(self, dim, index, source, alpha, out); +} + +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add::call(self, dim, index, source, alpha); +} + +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor index_add(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_dimname::call(self, dim, index, source, alpha); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isclose.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isclose.h new file mode 100644 index 0000000000000000000000000000000000000000..452c37ccb7338c42e5b5c218fa54bec5c1dc33ac --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isclose.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +inline at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..06a14b24e644ceea3490a402ece19afeb849de0a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_lu_factor_ex_out : public at::meta::structured_linalg_lu_factor_ex { +void impl(const at::Tensor & A, bool pivot, bool check_errors, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9f7670c35a1a443d727d9e5e8d51099e124d48a6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_matrix_exp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & linalg_matrix_exp_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & linalg_matrix_exp_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c82bf9bbeda792a63618fa8dfe65e1c171e2a22a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_log10 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..36fe5692331a07dbb52c03b07fb4a1b1e94c017b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple log_sigmoid_forward_cpu(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cpu(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +TORCH_API ::std::tuple log_sigmoid_forward_cuda(const at::Tensor & self); +TORCH_API ::std::tuple log_sigmoid_forward_out_cuda(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logit_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2d79b57852006186ddb9574f9c69fc985fe9d903 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logit_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logit(const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_out(const at::Tensor & self, ::std::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lstm_cell.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lstm_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..52ca585fb7c1ba87e74572d4d14f8f107aab4979 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lstm_cell.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) +inline ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/masked_select_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/masked_select_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..12164d10c8b54dd84e559f8c287d9189a2c3368a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/masked_select_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor +inline at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + return at::_ops::masked_select_backward::call(grad, input, mask); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42283961e1f402853194c51b0d118f39bc69fae0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & median_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor median_cpu(const at::Tensor & self); +TORCH_API at::Tensor median_cuda(const at::Tensor & self); +TORCH_API ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple median_out_cpu(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple median_out_cuda(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple median(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c3876bde08fce1536a4daec0d73f90cdb9e83ad5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mps_convolution_transpose_backward_out_symint(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5106188afa5820e538f9f1849ffa33bc6753800 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_batch_norm_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple native_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask); +TORCH_API ::std::tuple native_batch_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f27da40834595828a8af463605a8f7fe3b54c984 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple native_dropout_out(const at::Tensor & input, double p, ::std::optional train, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple native_dropout_cpu(const at::Tensor & input, double p, ::std::optional train); +TORCH_API ::std::tuple native_dropout_cuda(const at::Tensor & input, double p, ::std::optional train); +TORCH_API ::std::tuple native_dropout_nested(const at::Tensor & input, double p, ::std::optional train); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db9fb8ea53326e8b899b5744eb2df33aecb2dd9b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_layer_norm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_outf(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple native_layer_norm_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_symint_outf(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nextafter_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nextafter_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b71d4c660294ad1a75db1c1d296b5e235b78f4f4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nextafter_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_nextafter_out : public at::meta::structured_nextafter { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6a6da3a819f0718b514568576d414d26eaef1636 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple nll_loss2d_forward_cpu(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight); +TORCH_API ::std::tuple nll_loss2d_forward_cuda(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..003f5504199d476d879abb58e55317a9c07493d8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/outer_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor outer(const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & outer_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & outer_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pow_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pow_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1b3190e6074895d2a6737d98df5535129dfba886 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pow_meta_dispatch.h @@ -0,0 +1,33 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/qscheme_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/qscheme_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..58ff85e666397529bcb954cc468d3a0076285d78 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/qscheme_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API qscheme { + using schema = at::QScheme (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::qscheme") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "qscheme(Tensor self) -> QScheme") + static at::QScheme call(const at::Tensor & self); + static at::QScheme redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randint_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randint_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cd5393fa3de3fc9ccbffc1cfa0ee43dd0ea7c8d7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randint_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API randint { + using schema = at::Tensor (c10::SymInt, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API randint_generator { + using schema = at::Tensor (c10::SymInt, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API randint_low { + using schema = at::Tensor (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API randint_low_generator { + using schema = at::Tensor (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +}; + +struct TORCH_API randint_out { + using schema = at::Tensor & (c10::SymInt, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out); +}; + +struct TORCH_API randint_generator_out { + using schema = at::Tensor & (c10::SymInt, c10::SymIntArrayRef, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API randint_low_out { + using schema = at::Tensor & (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::Tensor & out); +}; + +struct TORCH_API randint_low_generator_out { + using schema = at::Tensor & (c10::SymInt, c10::SymInt, c10::SymIntArrayRef, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_generator_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resize_as_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resize_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f02b4f691a18d88c3549fea1584587649d9f4f54 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resize_as_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor resize_as(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format=::std::nullopt); +TORCH_API const at::Tensor & resize_as_out(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format, const at::Tensor & out); +TORCH_API const at::Tensor & resize_as_(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_add_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_add_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6d511814bcd53dadfbd8491ab580916bc7afa558 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_add_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out); +TORCH_API at::Tensor & scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b2a34f75cccc3d842759f46f943b0776d639cec0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_cpu_dispatch.h @@ -0,0 +1,38 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..62e336bbddd77a87be3eb84d84730cf9a10b9a9a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sgn(const at::Tensor & self); +TORCH_API at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sgn_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d.h new file mode 100644 index 0000000000000000000000000000000000000000..eaa151a25bd3320bacf9e05c6e4d70aa3c187227 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation)); + } +} + +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1) -> Tensor +inline at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); +} +namespace symint { + template ::value>> + at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, c10::fromIntArrayRefSlow(kernel_size), bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), out); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); + } +} + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, SymInt[2] kernel_size, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & slow_conv_dilated2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); +} +namespace symint { + template ::value>> + at::Tensor & slow_conv_dilated2d_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::call(self, weight, kernel_size, bias, stride, padding, dilation, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f82841ac075fecce3a371271dca2df908faf47c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e23f1f6bd34915d302da42131f49fb1353ed900f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac1521d3cff2182e4677f1f754097020dab9006 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i1.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i1.h new file mode 100644 index 0000000000000000000000000000000000000000..bec209b582698d21fed083ebd3171ab7571b65ec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i1.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_i1(Tensor self) -> Tensor +inline at::Tensor special_i1(const at::Tensor & self) { + return at::_ops::special_i1::call(self); +} + +// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i1_out::call(self, out); +} +// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i1_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4a57c6efda3f9b511e2fdbd26eacc5402c30df13 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..18c8f6dacc4fa3cbb397ba8ce343d4d9623ab8ea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector split(const at::Tensor & self, int64_t split_size, int64_t dim=0); +TORCH_API ::std::vector split_symint(const at::Tensor & self, c10::SymInt split_size, int64_t dim=0); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c78f87d09af69fad98ed358c18bb47c61f7a84d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sqrt_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sqrt(const at::Tensor & self); +TORCH_API at::Tensor & sqrt_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..295e9658d8ffe3b812227696874143f853593190 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sub_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..49d4f876de01faf504d80cb6137b7429830c553b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/t_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & t_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & t_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tanh_backward_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tanh_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8239bf39dd71281ecab9407282d89696377bfc68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tanh_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_tanh_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & output); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ae45fb4d72460b100e16be3a71c8a0de830bcd13 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor to_sparse(const at::Tensor & self, int64_t sparse_dim); +TORCH_API at::Tensor to_sparse(const at::Tensor & self, ::std::optional layout=::std::nullopt, at::OptionalIntArrayRef blocksize=::std::nullopt, ::std::optional dense_dim=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..07ba52f86ead41ea07032dc0968b1cf649e30f36 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::trace(Tensor self) -> Tensor +inline at::Tensor trace(const at::Tensor & self) { + return at::_ops::trace::call(self); +} + +// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & trace_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::trace_out::call(self, out); +} +// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & trace_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::trace_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ca8fd616d3310fc59bb75cb6e39a14252390bf4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor trace(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f085020b38f330728f9012051b52549f895556e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor upsample_trilinear3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_trilinear3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_trilinear3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at