diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_assert_async_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_assert_async_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cfdf4be4fa4dead18edb85e332dc3b67a9c09031 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_assert_async_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _assert_async { + using schema = void (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_assert_async") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_assert_async(Tensor self) -> ()") + static void call(const at::Tensor & self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _assert_async_msg { + using schema = void (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_assert_async") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "msg") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_assert_async.msg(Tensor self, str assert_msg) -> ()") + static void call(const at::Tensor & self, c10::string_view assert_msg); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view assert_msg); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3707cf7fdc8adede0719fa9b8ac490a1c8fe10bb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cholesky_solve_helper { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cholesky_solve_helper") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & A, bool upper); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper); +}; + +struct TORCH_API _cholesky_solve_helper_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cholesky_solve_helper") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eddef49ceb921a59f26806a645dbc09e12237dc3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _coalesced_(at::Tensor & self, bool coalesced); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4b8da4688b73398675676179e26de258fddbb84a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector foreach_tensor_clamp_min_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_clamp_min_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out); +TORCH_API void foreach_tensor_clamp_min_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_min_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector foreach_tensor_clamp_min_list_kernel_slow(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_clamp_min_List_out(at::TensorList self, at::TensorList other, at::TensorList out); +TORCH_API void foreach_tensor_clamp_min_list_kernel_slow_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector foreach_tensor_clamp_min_list_kernel_cuda(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_min_list_kernel_cuda_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_clamp_min_ScalarList_out(at::TensorList self, at::ArrayRef scalars, at::TensorList out); +TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef scalars); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef scalars); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_lerp_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_lerp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c51274a7ae94d01774f3997df525d8c9a5399f41 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_lerp_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_lerp_List { + using schema = ::std::vector (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList tensors1, at::TensorList weights); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights); +}; + +struct TORCH_API _foreach_lerp__List { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, at::TensorList weights); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights); +}; + +struct TORCH_API _foreach_lerp_Scalar { + using schema = ::std::vector (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +}; + +struct TORCH_API _foreach_lerp__Scalar { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +}; + +struct TORCH_API _foreach_lerp_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out); +}; + +struct TORCH_API _foreach_lerp_Scalar_out { + using schema = void (at::TensorList, at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_functional_assert_async_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_functional_assert_async_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2046afc81a532dc0345cee165411ff7a5000181c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_functional_assert_async_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _functional_assert_async_msg_cpu(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..29292a8a407b23a3c51d95039f2d0a3d2e039f6d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_histogramdd_from_bin_tensors.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor +inline at::Tensor _histogramdd_from_bin_tensors(const at::Tensor & self, at::TensorList bins, const ::std::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density); +} + +// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _histogramdd_from_bin_tensors_out(at::Tensor & out, const at::Tensor & self, at::TensorList bins, const ::std::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_tensors_out::call(self, bins, weight, density, out); +} +// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _histogramdd_from_bin_tensors_outf(const at::Tensor & self, at::TensorList bins, const ::std::optional & weight, bool density, at::Tensor & out) { + return at::_ops::_histogramdd_from_bin_tensors_out::call(self, bins, weight, density, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_dual_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_dual_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..55754cf1bac3245a075d856af87828a72d40b98e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_dual_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _make_dual { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_dual") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)") + static at::Tensor call(const at::Tensor & primal, const at::Tensor & tangent, int64_t level); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..116f87ce330ff51db39aa4c42f3778cc4f4c40f3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, ::std::optional scale=::std::nullopt) { + return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); +} +namespace symint { + template ::value>> + ::std::tuple _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, ::std::optional scale=::std::nullopt) { + return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); + } +} + +// aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, Tensor attn_bias, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, *, float? scale=None) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _scaled_dot_product_cudnn_attention_backward_symint(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional scale=::std::nullopt) { + return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); +} +namespace symint { + template ::value>> + ::std::tuple _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional scale=::std::nullopt) { + return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, philox_seed, philox_offset, attn_bias, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, scale); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9e03ba718199a5ee9fd6c1c1a87dc241d62481b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_draw_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sobol_engine_draw { + using schema = ::std::tuple (const at::Tensor &, int64_t, const at::Tensor &, int64_t, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sobol_engine_draw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional dtype); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, ::std::optional dtype); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_ff_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_ff_native.h new file mode 100644 index 0000000000000000000000000000000000000000..44304158c3bb87821b17ecf28862031942be9944 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sobol_engine_ff_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sobol_engine_ff_(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_unsafe_masked_index_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_unsafe_masked_index_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..33c75e0776adff035c47ffc4b160ee78e2874119 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_unsafe_masked_index_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unsafe_masked_index { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::List<::std::optional> &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_masked_index") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_masked_index(Tensor self, Tensor mask, Tensor?[] indices, Scalar fill) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional> & indices, const at::Scalar & fill); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const c10::List<::std::optional> & indices, const at::Scalar & fill); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3689df0585101f6a280b64f9bab55b3b29d3d257 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _upsample_bilinear2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); +TORCH_API at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..76566a00d1e5a345b2e8fdb2d2642c142802bdfc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_version_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_version_native.h new file mode 100644 index 0000000000000000000000000000000000000000..17500bb111ddbb2594df4e585693f25afea8ed5e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_version_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _version(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/abs_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/abs_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0257e11bd0ce2b1d4712d6cd0c4b018110457583 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/abs_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/absolute_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/absolute_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d310c4ceef4b812fd9670bb0d570fb3c2003c575 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/absolute_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API absolute { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API absolute_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API absolute_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5126018e0a0d21e11f418cde969253e7b66ce5c7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcmul.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcmul.h new file mode 100644 index 0000000000000000000000000000000000000000..da7ea2feb4a351433132ff8d876453b9cbde7761 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcmul.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul_out::call(self, tensor1, tensor2, value, out); +} +// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcmul_out::call(self, tensor1, tensor2, value, out); +} + +// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul::call(self, tensor1, tensor2, value); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amin_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4fa55718355377e7d15a2f762de3c9b6968d9cf7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amin_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); +TORCH_API at::Tensor & amin_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool3d_backward_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool3d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..24dbeb99a18eb154937a86933d48a6d2fcddee39 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool3d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_avg_pool3d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16af18f329b0f0aee61f8992b703171dc0a8c126 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_right_shift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,29 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_right_shift_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1cef3bfb94db8d0cf24c245b3f99348a968e89 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/broadcast_tensors_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector broadcast_tensors(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cat_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cat_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8b1b4e985f364fda56393bf234c87a524e12864a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cat_meta.h @@ -0,0 +1,114 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_cat : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +ret.valid = this->valid; +ret.all_contiguous = this->all_contiguous; +ret.all_same_dtype = this->all_same_dtype; +ret.all_same_sizes_and_stride = this->all_same_sizes_and_stride; +ret.memory_format = this->memory_format; +return ret; + } + + + precompute_out set_valid(int64_t value) { + static_assert(VALID == false, "valid already set"); + precompute_out ret; +ret.dim = this->dim; +ret.valid = value; +ret.all_contiguous = this->all_contiguous; +ret.all_same_dtype = this->all_same_dtype; +ret.all_same_sizes_and_stride = this->all_same_sizes_and_stride; +ret.memory_format = this->memory_format; +return ret; + } + + + precompute_out set_all_contiguous(bool value) { + static_assert(ALL_CONTIGUOUS == false, "all_contiguous already set"); + precompute_out ret; +ret.dim = this->dim; +ret.valid = this->valid; +ret.all_contiguous = value; +ret.all_same_dtype = this->all_same_dtype; +ret.all_same_sizes_and_stride = this->all_same_sizes_and_stride; +ret.memory_format = this->memory_format; +return ret; + } + + + precompute_out set_all_same_dtype(bool value) { + static_assert(ALL_SAME_DTYPE == false, "all_same_dtype already set"); + precompute_out ret; +ret.dim = this->dim; +ret.valid = this->valid; +ret.all_contiguous = this->all_contiguous; +ret.all_same_dtype = value; +ret.all_same_sizes_and_stride = this->all_same_sizes_and_stride; +ret.memory_format = this->memory_format; +return ret; + } + + + precompute_out set_all_same_sizes_and_stride(bool value) { + static_assert(ALL_SAME_SIZES_AND_STRIDE == false, "all_same_sizes_and_stride already set"); + precompute_out ret; +ret.dim = this->dim; +ret.valid = this->valid; +ret.all_contiguous = this->all_contiguous; +ret.all_same_dtype = this->all_same_dtype; +ret.all_same_sizes_and_stride = value; +ret.memory_format = this->memory_format; +return ret; + } + + + precompute_out set_memory_format(at::MemoryFormat value) { + static_assert(MEMORY_FORMAT == false, "memory_format already set"); + precompute_out ret; +ret.dim = this->dim; +ret.valid = this->valid; +ret.all_contiguous = this->all_contiguous; +ret.all_same_dtype = this->all_same_dtype; +ret.all_same_sizes_and_stride = this->all_same_sizes_and_stride; +ret.memory_format = value; +return ret; + } + + int64_t dim; +int64_t valid; +bool all_contiguous; +bool all_same_dtype; +bool all_same_sizes_and_stride; +at::MemoryFormat memory_format; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::ITensorListRef & tensors, int64_t dim); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_max_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4e2e606f28f98b82b96985f1879a663c0a7f208 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_max_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out); +TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max); +TORCH_API at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max); +TORCH_API at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out); +TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/complex_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/complex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4709deada6f71bc5fe1faef8164f66ff11e849b5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/complex_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor complex(const at::Tensor & real, const at::Tensor & imag); +TORCH_API at::Tensor & complex_out(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_tbc_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_tbc_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..91e7af05536b4f7c764d9edabc36ce768d69b0ac --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_tbc_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conv_tbc_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_tbc_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9e583aeb53191a7cb6f093b033bf397c55e0d1ba --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conv_transpose3d_input { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_transpose3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..28e4c5e78074c36cc1e317110732569edabebf69 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..85f56452f51c61490623023468636fabfbeee2e3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fbgemm_pack_quantized_matrix { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_pack_quantized_matrix") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_pack_quantized_matrix(Tensor input) -> Tensor") + static at::Tensor call(const at::Tensor & input); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input); +}; + +struct TORCH_API fbgemm_pack_quantized_matrix_KN { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_pack_quantized_matrix") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "KN") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor") + static at::Tensor call(const at::Tensor & input, int64_t K, int64_t N); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t K, int64_t N); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a93af795ba6130deb5c016110ec8d823fe028b72 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmod_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmod_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmod_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2b611521f414ccc60a04d757e44befb0575ba61d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmod_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmod_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API fmod_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API fmod__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API fmod_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API fmod_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmod__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmod_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5722d88243346058ae366b70e7f3aadbf9e8e18 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const ::std::optional & weight={}, const ::std::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/heaviside.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/heaviside.h new file mode 100644 index 0000000000000000000000000000000000000000..208478173add0bec5c3fa6f1798655d18550bacc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/heaviside.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_out::call(self, values, out); +} +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out) { + return at::_ops::heaviside_out::call(self, values, out); +} + +// aten::heaviside(Tensor self, Tensor values) -> Tensor +inline at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside::call(self, values); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1787471c9f44c97f73334aefb426695c30091f5d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/less_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor less(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log2.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log2.h new file mode 100644 index 0000000000000000000000000000000000000000..4bac027765775c04aa5f4078d4195c5bcff657cb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log2.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log2(Tensor self) -> Tensor +inline at::Tensor log2(const at::Tensor & self) { + return at::_ops::log2::call(self); +} + +// aten::log2_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & log2_(at::Tensor & self) { + return at::_ops::log2_::call(self); +} + +// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log2_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::log2_out::call(self, out); +} +// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & log2_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::log2_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_softmax_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d2bde848e0961f97436f9ee00c98df7fe261396f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_softmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & log_softmax_out(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6986d398400cfa66a2b6a647e0ce2b32926f65bd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9680e33f3f4f259b6cf3f0cd76260b0791c55cbf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_linear_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_linear_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9501203b4a1fb3ef45dc26364da119f3ac570e78 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_linear_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & mkldnn_linear_out(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); +TORCH_API at::Tensor mkldnn_linear(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias={}); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6e78a879b24da91bbe3449c29046b565b934332 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & mkldnn_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & mkldnn_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/moveaxis_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/moveaxis_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8c13dfd717d4d4d964bf82986a120237152868b9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/moveaxis_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API moveaxis_intlist { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::moveaxis") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "intlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination); +}; + +struct TORCH_API moveaxis_int { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::moveaxis") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t source, int64_t destination); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mse_loss_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mse_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..73f6fcfe4724d4231bea91ae272bac572f79bd33 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mse_loss_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_mse_loss_out : public at::meta::structured_mse_loss { +void impl(const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nan_to_num_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nan_to_num_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b7a6adc1cecc3f5e6f428763cecce6a4dcea00fb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nan_to_num_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, ::std::optional nan=::std::nullopt, ::std::optional posinf=::std::nullopt, ::std::optional neginf=::std::nullopt); +TORCH_API at::Tensor & nan_to_num_outf(const at::Tensor & self, ::std::optional nan, ::std::optional posinf, ::std::optional neginf, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..662c8edf857f5c3c8f986c1a9565af36300da2e6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple native_dropout(const at::Tensor & input, double p, ::std::optional train); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_nd.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_nd.h new file mode 100644 index 0000000000000000000000000000000000000000..f73658c78f69c70bd91aa5066217a1feafff2bd1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_nd.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +inline at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); +} +namespace symint { + template ::value>> + at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); + } +} + +// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +inline at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); +} +namespace symint { + template ::value>> + at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100) { + return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/norm_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6555beea142e33fdae4cc9300d848625e28daa5a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/norm_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API norm_ScalarOpt_dtype { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarOpt_dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype); +}; + +struct TORCH_API norm_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.Scalar(Tensor self, Scalar p=2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p); +}; + +struct TORCH_API norm_ScalarOpt_dim_dtype { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarOpt_dim_dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +}; + +struct TORCH_API norm_ScalarOpt_dim { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarOpt_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API norm_dtype_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::ScalarType, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dtype_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +}; + +struct TORCH_API norm_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API norm_names_ScalarOpt_dim_dtype { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_ScalarOpt_dim_dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype); +}; + +struct TORCH_API norm_names_ScalarOpt_dim { + using schema = at::Tensor (const at::Tensor &, const ::std::optional &, at::DimnameList, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_ScalarOpt_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim); +}; + +struct TORCH_API norm_names_dtype_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::ScalarType, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dtype_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +}; + +struct TORCH_API norm_names_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::DimnameList, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API norm_ScalarOpt_dtype_out { + using schema = at::Tensor & (const at::Tensor &, const ::std::optional &, at::ScalarType, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarOpt_dtype_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype, at::Tensor & out); +}; + +struct TORCH_API norm_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/put_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/put_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b94ab7225958dd622e2b2e41bf8e04ff232c0a0e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/put_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/qscheme_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/qscheme_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d77aaaf3c87785ed415f3921d6d9b09a62fb525d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/qscheme_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::QScheme qscheme_quant(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/row_indices_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/row_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1b1d2310a47ed5d3fedacc15a24e772f0c8835d3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/row_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API row_indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::row_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "row_indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9f4522c159a31a7738da8a98282b609341b0da8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_cuda_dispatch.h @@ -0,0 +1,38 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9294ba854b56891a7c8e1361eca34bdc238896ee --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_meta_dispatch.h @@ -0,0 +1,38 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d2c392205d64631bdc49537127c701710dc3ff86 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sgn : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aaa3969d45fe5c17b787504685b8aa96c7ce27db --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & sigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sigmoid_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..996e28f921ece8d52101b984686e1b6745694e3b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sin_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sin(const at::Tensor & self); +TORCH_API at::Tensor & sin_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sinc_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sinc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a14a6303a3436d01e3435ac7cd68bd6be957d978 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sinc_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sinc_out : public at::meta::structured_sinc { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/smm.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/smm.h new file mode 100644 index 0000000000000000000000000000000000000000..da25c1b6aa154634c800a18d9d947d04155dd48f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/smm.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::smm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor smm(const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::smm::call(self, mat2); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus.h new file mode 100644 index 0000000000000000000000000000000000000000..f3f2c533638704b5a440033631ab75b0cf41fbcb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus_out::call(self, beta, threshold, out); +} +// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) { + return at::_ops::softplus_out::call(self, beta, threshold, out); +} + +// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor +inline at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus::call(self, beta, threshold); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e2f6a1ff0d133f5cfbccbe06aeaa910a1654cda4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward_grad_input::call(grad_output, self, lambd, grad_input); +} +// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::softshrink_backward_grad_input::call(grad_output, self, lambd, grad_input); +} + +// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor +inline at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward::call(grad_output, self, lambd); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..08059cd739556de5621adbd6df5c04a11c290d83 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_csr_tensor_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_csr_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e623bb1bd1883bbb3a47ff1a30983dc0690d7ae4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_csr_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..483f0d5eafde24f9d8b42075df81b846f0d3786c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_entr(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c38fba683edbfffc051d7969fa9f061e25e9690d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfc_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_erfc(const at::Tensor & self); +TORCH_API at::Tensor & special_erfc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_erfc_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_native.h new file mode 100644 index 0000000000000000000000000000000000000000..20b4fe4450171d664c95f45083cbfb41a1b7c824 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_hermite_polynomial_h_out : public at::meta::structured_special_hermite_polynomial_h { +void impl(const at::Tensor & x, const at::Tensor & n, const at::Tensor & out); +}; +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9d2af42765c6e392d21674be88394e04fcf24503 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_modified_bessel_k0_out : public at::meta::structured_special_modified_bessel_k0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00e4d99047fce7b296a4d9db81564ad3d6746e3d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/std_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/std_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..57afa6d2238732558b388d6be8e2471d7f34eb4f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/std_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fbfeb0c4047681ade10cf61c1408749b6fdece2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tril_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..4022905fd3f435809b7618775556ac9ee7579116 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor +inline at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step); +} +namespace symint { + template ::value>> + at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step); + } +} + +// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor +inline at::Tensor unfold_backward_symint(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); +} +namespace symint { + template ::value>> + at::Tensor unfold_backward(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, c10::fromIntArrayRefSlow(input_sizes), dim, size, step, out); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_symint_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); + } +} + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unfold_backward_symint_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); +} +namespace symint { + template ::value>> + at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::call(grad_in, input_sizes, dim, size, step, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..568495b7acbb85a6c03bdacc70da5259f14368ef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_consecutive.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_consecutive.h new file mode 100644 index 0000000000000000000000000000000000000000..d476d777bbb6fe31b75d8dbfce643b857b5cce80 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_consecutive.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) +inline ::std::tuple unique_consecutive(const at::Tensor & self, bool return_inverse=false, bool return_counts=false, ::std::optional dim=::std::nullopt) { + return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim); +} + +// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple unique_consecutive_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool return_inverse=false, bool return_counts=false, ::std::optional dim=::std::nullopt) { + return at::_ops::unique_consecutive_out::call(self, return_inverse, return_counts, dim, out0, out1, out2); +} +// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple unique_consecutive_outf(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_consecutive_out::call(self, return_inverse, return_counts, dim, out0, out1, out2); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_consecutive_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_consecutive_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..110b7b04aa345ee4a260fe82ed62908891e4b377 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_consecutive_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unique_consecutive { + using schema = ::std::tuple (const at::Tensor &, bool, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_consecutive") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional dim); +}; + +struct TORCH_API unique_consecutive_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, ::std::optional, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_consecutive") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, ::std::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..992dd566bc7bc0d9f04c42d19b862eaeb275d52b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_bicubic2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest3d_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d0686854fa67bc7cc96570e982e4f909e646099d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest3d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_nearest3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, ::std::optional> scale_factors); +struct TORCH_API structured_upsample_nearest3d_out_cpu : public at::meta::structured_upsample_nearest3d { +void impl(const at::Tensor & self, at::ArrayRef output_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_nearest3d_out_cuda : public at::meta::structured_upsample_nearest3d { +void impl(const at::Tensor & self, at::ArrayRef output_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +TORCH_API at::Tensor upsample_nearest3d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_d=::std::nullopt, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_copy_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bece11bea2fb2b99817c029d3445af19abbca081 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & view_as_real_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor view_as_real_copy(const at::Tensor & self); +} // namespace native +} // namespace at