diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_add_batch_dim_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_add_batch_dim_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a86ea943ffbbf2081b18aaf6a6e639e73e869d12 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_add_batch_dim_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _add_batch_dim { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_add_batch_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t batch_dim, int64_t level); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t batch_dim, int64_t level); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_autocast_to_full_precision.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_autocast_to_full_precision.h new file mode 100644 index 0000000000000000000000000000000000000000..0fb09a617c20e838afe772dcb5bf4b20473f23aa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_autocast_to_full_precision.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Int.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Int.h new file mode 100644 index 0000000000000000000000000000000000000000..3ba69a2b274439d4198311e869424f3421711218 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Int.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor +inline at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Int::call(self, non_blocking); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_copy.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..9a231ccbffa2ac33e6ac87d5b880869b99f7a40f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_conj_copy(Tensor self) -> Tensor +inline at::Tensor _conj_copy(const at::Tensor & self) { + return at::_ops::_conj_copy::call(self); +} + +// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _conj_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_copy_out::call(self, out); +} +// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _conj_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_copy_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_copy_from_and_resize_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_copy_from_and_resize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..173a0558a04211badc4213b7f1cce5d96cb5d48c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_copy_from_and_resize_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _copy_from_and_resize { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_copy_from_and_resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & dst); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst); +}; + +struct TORCH_API _copy_from_and_resize_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_copy_from_and_resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & dst, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_abs.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_abs.h new file mode 100644 index 0000000000000000000000000000000000000000..78c9ad9e530581c08111542a58ad61d0923e89d3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_abs.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_abs(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_abs(at::TensorList self) { + return at::_ops::_foreach_abs::call(self); +} + +// aten::_foreach_abs_(Tensor(a!)[] self) -> () +inline void _foreach_abs_(at::TensorList self) { + return at::_ops::_foreach_abs_::call(self); +} + +// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_abs_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_abs_out::call(self, out); +} +// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_abs_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_abs_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_div_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_div_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a97528c8505429e4d9ef7d040bbc9a55196397c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_div_ops.h @@ -0,0 +1,149 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_div_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & scalar); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_div__Scalar { + using schema = void (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar); +}; + +struct TORCH_API _foreach_div_List { + using schema = ::std::vector (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::TensorList other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_div__List { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()") + static void call(at::TensorList self, at::TensorList other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other); +}; + +struct TORCH_API _foreach_div_ScalarList { + using schema = ::std::vector (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]") + static ::std::vector call(at::TensorList self, at::ArrayRef scalars); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_div__ScalarList { + using schema = void (at::TensorList, at::ArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars); +}; + +struct TORCH_API _foreach_div_Tensor { + using schema = ::std::vector (at::TensorList, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Tensor(Tensor[] self, Tensor other) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Tensor & other); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other); +}; + +struct TORCH_API _foreach_div__Tensor { + using schema = void (at::TensorList, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div_.Tensor(Tensor(a!)[] self, Tensor other) -> ()") + static void call(at::TensorList self, const at::Tensor & other); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other); +}; + +struct TORCH_API _foreach_div_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & scalar, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out); +}; + +struct TORCH_API _foreach_div_List_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "List_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out); +}; + +struct TORCH_API _foreach_div_ScalarList_out { + using schema = void (at::TensorList, at::ArrayRef, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "ScalarList_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::ArrayRef scalars, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out); +}; + +struct TORCH_API _foreach_div_Tensor_out { + using schema = void (at::TensorList, const at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_div") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_div.Tensor_out(Tensor[] self, Tensor other, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Tensor & other, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & other, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_pow_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_pow_native.h new file mode 100644 index 0000000000000000000000000000000000000000..94f1fd1e8ed3ae2330bb4a0c8eaf8878632d4289 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_pow_native.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector foreach_tensor_pow_list_kernel_slow(at::TensorList self, at::TensorList exponent); +TORCH_API void _foreach_pow_List_out(at::TensorList self, at::TensorList exponent, at::TensorList out); +TORCH_API void foreach_tensor_pow_list_kernel_slow_(at::TensorList self, at::TensorList exponent); +TORCH_API ::std::vector foreach_tensor_pow_list_kernel_cuda(at::TensorList self, at::TensorList exponent); +TORCH_API void foreach_tensor_pow_list_kernel_cuda_(at::TensorList self, at::TensorList exponent); +TORCH_API ::std::vector foreach_tensor_pow_scalar_kernel_slow(at::TensorList self, const at::Scalar & exponent); +TORCH_API void _foreach_pow_Scalar_out(at::TensorList self, const at::Scalar & exponent, at::TensorList out); +TORCH_API void foreach_tensor_pow_scalar_kernel_slow_(at::TensorList self, const at::Scalar & exponent); +TORCH_API ::std::vector foreach_tensor_pow_scalar_kernel_cuda(at::TensorList self, const at::Scalar & exponent); +TORCH_API void foreach_tensor_pow_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & exponent); +TORCH_API ::std::vector foreach_tensor_pow_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef exponent); +TORCH_API void _foreach_pow_ScalarList_out(at::TensorList self, at::ArrayRef exponent, at::TensorList out); +TORCH_API void foreach_tensor_pow_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef exponent); +TORCH_API ::std::vector foreach_tensor_pow_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef exponent); +TORCH_API void foreach_tensor_pow_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef exponent); +TORCH_API ::std::vector foreach_scalar_pow_list_kernel_slow(const at::Scalar & self, at::TensorList exponent); +TORCH_API ::std::vector foreach_scalar_pow_list_kernel_cuda(const at::Scalar & self, at::TensorList exponent); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_index_put_impl_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_index_put_impl_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2380c013237b940b8103f9bfe4cfa5c0875e74fa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_index_put_impl_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8779ef3497b24aa118f5c4e1928b339d26798917 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _make_per_channel_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis); +TORCH_API at::Tensor & _make_per_channel_quantized_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c09e7f54ba192b7b8694bdedc5b957c853951fc2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_reshape_alias_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride); +TORCH_API at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_safe_softmax.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_safe_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..35c4df01c99270388bb18d4534c419a3d433f6e7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_safe_softmax.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_safe_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor _safe_softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt) { + return at::_ops::_safe_softmax::call(self, dim, dtype); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5dbd50277390e97d1a820159f76c8f7e1919ebb1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _slow_conv2d_backward_grad_input { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_slow_conv2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +}; + +struct TORCH_API _slow_conv2d_backward_output_mask { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_slow_conv2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output_mask") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask); +}; + +struct TORCH_API _slow_conv2d_backward_output_mask_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_slow_conv2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output_mask_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4b0a50a789010f82b2d6cb3e51d225ad02de25a6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense); +TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51ad69c01007f968798b6a9a394ae92c021d0283 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _sparse_mm_reduce_impl_backward_sparse_csr_cpu(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_semi_structured_tile_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_semi_structured_tile_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0a76c27fbf4bbdc0f6a922c556630456d520462e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_semi_structured_tile_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_semi_structured_tile { + using schema = ::std::tuple (const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_semi_structured_tile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_semi_structured_tile(Tensor input, str algorithm=\"\", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, c10::string_view algorithm, bool use_cutlass); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::string_view algorithm, bool use_cutlass); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_string_default.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_string_default.h new file mode 100644 index 0000000000000000000000000000000000000000..b66d90b21246ccbbc23de03cb37da576ae1d7e2c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_string_default.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor +inline at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\") { + return at::_ops::_test_string_default::call(dummy, a, b); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_bsc_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_bsc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c0138be58f0b226d3cce3915166067df71f9c6da --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_bsc_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _to_sparse_bsc(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_unsafe_index_put_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_unsafe_index_put_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..34544b42168f34764d10a3c8ba92377a135d919d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_unsafe_index_put_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unsafe_index_put { + using schema = at::Tensor (const at::Tensor &, const c10::List<::std::optional> &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_index_put") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List<::std::optional> & indices, const at::Tensor & values, bool accumulate); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8021af94b542eaf44545ec6edb7b70547f13ff1b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _upsample_bicubic2d_aa(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); +TORCH_API at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9dceb77ad863750bba3670278c20dba56164380c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _upsample_bicubic2d_aa_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, bool, ::std::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bicubic2d_aa") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); +}; + +struct TORCH_API _upsample_bicubic2d_aa_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bicubic2d_aa") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API _upsample_bicubic2d_aa { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_bicubic2d_aa") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_validate_sparse_bsr_tensor_args_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_validate_sparse_bsr_tensor_args_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d5494e20f03c05c5a2dfe87d5e8a14ad4b428913 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_validate_sparse_bsr_tensor_args_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _validate_sparse_bsr_tensor_args { + using schema = void (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_validate_sparse_bsr_tensor_args") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()") + static void call(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..935b184402a06f99844e2b8088dea574ec6ed0c1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcdiv.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..2715eb8ba9b5831f4b4885db9c16278e601e5f93 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcdiv.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out); +} +// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out); +} + +// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv::call(self, tensor1, tensor2, value); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcmul_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcmul_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42cf40440ed6389404c32ba0acf214e0a2fbe538 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addcmul_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/aminmax_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/aminmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7758a4494e03179b03282bc993706fa892649b07 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/aminmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_aminmax_out : public at::meta::structured_aminmax { +void impl(const at::Tensor & self, ::std::optional dim, bool keepdim, const at::Tensor & min, const at::Tensor & max); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fefa46d4cde12302331acb659d0f4de6de7614fe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, ::std::optional generator=::std::nullopt); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_and.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_and.h new file mode 100644 index 0000000000000000000000000000000000000000..97b11aff3fd10ad62416a6927cc2c78562c691e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_and.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Tensor_out::call(self, other, out); +} +// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_and_Tensor_out::call(self, other, out); +} + +// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and_Scalar_out::call(self, other, out); +} +// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_and_Scalar_out::call(self, other, out); +} + +// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor bitwise_and(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and_Scalar::call(self, other); +} + +// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +inline at::Tensor bitwise_and(const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Scalar_Tensor::call(self, other); +} + +// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Tensor::call(self, other); +} + +// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_and_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other, out); +} +// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bitwise_and_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_and_Scalar_Tensor_out::call(self, other, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chalf_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chalf_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c2b0d79fbe50c0f9aba006c9495b07e3a5b29095 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chalf_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor chalf(const at::Tensor & self, ::std::optional memory_format=::std::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv1d_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4e3654d086a00e584763e60e2c9ae32a3686af69 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv1d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +TORCH_API at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv1d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2a6c58eb1bf531c19f565f909e9b382a46dc24eb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_affine_grid_generator_backward { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_affine_grid_generator_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta") + static at::Tensor call(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); +}; + +struct TORCH_API cudnn_affine_grid_generator_backward_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_affine_grid_generator_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72be7fddc50886b4eabb68b352ffe2401485f7dd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_convolution_relu_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cudnn_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); +TORCH_API at::Tensor cudnn_convolution_relu_symint(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c49e74c3f1fe439bedcdb8a67ed2abd9c57c20c9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & dequantize_self_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor dequantize_cpu_or_cuda(const at::Tensor & self); +TORCH_API at::Tensor dequantize_quantized(const at::Tensor & self); +TORCH_API void dequantize_tensors_out(at::TensorList tensors, at::TensorList out); +TORCH_API ::std::vector dequantize_tensors_quantized_cpu(at::TensorList tensors); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/empty_permuted.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/empty_permuted.h new file mode 100644 index 0000000000000000000000000000000000000000..88fc710e9022e6d1f7f797023253397b826ae010 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/empty_permuted.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options={}) { + return at::_ops::empty_permuted::call(c10::fromIntArrayRefSlow(size), physical_layout, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options={}) { + return at::_ops::empty_permuted::call(c10::fromIntArrayRefSlow(size), physical_layout, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::empty_permuted::call(c10::fromIntArrayRefSlow(size), physical_layout, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::empty_permuted::call(c10::fromIntArrayRefSlow(size), physical_layout, dtype, layout, device, pin_memory); + } +} + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options={}) { + return at::_ops::empty_permuted::call(size, physical_layout, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor empty_permuted(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options={}) { + return at::_ops::empty_permuted::call(size, physical_layout, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::empty_permuted::call(size, physical_layout, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor empty_permuted(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::empty_permuted::call(size, physical_layout, dtype, layout, device, pin_memory); + } +} + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & empty_permuted_out(at::Tensor & out, at::IntArrayRef size, at::IntArrayRef physical_layout) { + return at::_ops::empty_permuted_out::call(c10::fromIntArrayRefSlow(size), physical_layout, out); +} +namespace symint { + template ::value>> + at::Tensor & empty_permuted_out(at::Tensor & out, at::IntArrayRef size, at::IntArrayRef physical_layout) { + return at::_ops::empty_permuted_out::call(c10::fromIntArrayRefSlow(size), physical_layout, out); + } +} + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & empty_permuted_outf(at::IntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) { + return at::_ops::empty_permuted_out::call(c10::fromIntArrayRefSlow(size), physical_layout, out); +} +namespace symint { + template ::value>> + at::Tensor & empty_permuted_outf(at::IntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) { + return at::_ops::empty_permuted_out::call(c10::fromIntArrayRefSlow(size), physical_layout, out); + } +} + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & empty_permuted_symint_out(at::Tensor & out, c10::SymIntArrayRef size, at::IntArrayRef physical_layout) { + return at::_ops::empty_permuted_out::call(size, physical_layout, out); +} +namespace symint { + template ::value>> + at::Tensor & empty_permuted_out(at::Tensor & out, c10::SymIntArrayRef size, at::IntArrayRef physical_layout) { + return at::_ops::empty_permuted_out::call(size, physical_layout, out); + } +} + +// aten::empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & empty_permuted_symint_outf(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) { + return at::_ops::empty_permuted_out::call(size, physical_layout, out); +} +namespace symint { + template ::value>> + at::Tensor & empty_permuted_outf(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out) { + return at::_ops::empty_permuted_out::call(size, physical_layout, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b63ce01d012daa05c6fab0dc6ed850c02853c81f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_irfft2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor fft_irfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_irfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_irfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frac_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frac_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..48ceec3d83c5563c6075c3c55e913287a091e9cf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frac_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a0bde9faeffa4cbd265687632a7e24083146f361 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fractional_max_pool2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fractional_max_pool2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d85aa566cec158d40123473ea769e3c5563307e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/full_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names); +TORCH_API at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +TORCH_API at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f434e64830e4609e7c6915e4fdd6c7bd0b7580 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fused_moving_avg_obs_fake_quant { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fused_moving_avg_obs_fake_quant") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gather.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gather.h new file mode 100644 index 0000000000000000000000000000000000000000..f893171b9cac2068512d259e1dd7c7b04a00f355 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gather.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_out::call(self, dim, index, sparse_grad, out); +} +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_out::call(self, dim, index, sparse_grad, out); +} + +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather::call(self, dim, index, sparse_grad); +} + +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out); +} +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gather_outf(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_dimname_out::call(self, dim, index, sparse_grad, out); +} + +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname::call(self, dim, index, sparse_grad); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fe5129821001bc6dfbfc55640e2e699b04fe1f83 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_native.h new file mode 100644 index 0000000000000000000000000000000000000000..693f412a1d1b902c644a89993ff6c22fa561de9d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor geometric(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_out(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_copy_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..950ddc5ffa8a174799ec617ae10372aafd0fc9bf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_copy_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_copy_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +}; + +struct TORCH_API index_copy_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy__dimname { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +struct TORCH_API index_copy_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isin.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isin.h new file mode 100644 index 0000000000000000000000000000000000000000..26ddb87828759176020ae4ac55c174abc844b85f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isin.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out); +} +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Tensor_out::call(elements, test_elements, assume_unique, invert, out); +} + +// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert); +} + +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out); +} +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Scalar_out::call(elements, test_element, assume_unique, invert, out); +} + +// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert); +} + +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_out(at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out); +} +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isin_outf(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Scalar_Tensor_out::call(element, test_elements, assume_unique, invert, out); +} + +// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +inline at::Tensor isin(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b71771dad48bd3997baee02183432c38e43ee1c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_out(const at::Tensor & self, int64_t ind, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1fa15f464916f595e51672dae099617e8501b511 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/margin_ranking_loss_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API margin_ranking_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::margin_ranking_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/masked_select_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/masked_select_native.h new file mode 100644 index 0000000000000000000000000000000000000000..915ae34ec84befc83af9f904a2092eeb8ef465b0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/masked_select_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_select_cpu(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out_cpu(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); +TORCH_API at::Tensor masked_select_cuda(const at::Tensor & self, const at::Tensor & mask); +TORCH_API at::Tensor & masked_select_out_cuda(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matmul_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matmul_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d8b5f2f5d5da1d7bcc3b5d9b518c2376c2acdf95 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matmul_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple matmul_backward_out(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple matmul_backward_nested(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matrix_power.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matrix_power.h new file mode 100644 index 0000000000000000000000000000000000000000..a1cba85938b88206f70e6fa5476ea3400e5c9605 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matrix_power.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matrix_power(Tensor self, int n) -> Tensor +inline at::Tensor matrix_power(const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power::call(self, n); +} + +// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & matrix_power_out(at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power_out::call(self, n, out); +} +// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & matrix_power_outf(const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::matrix_power_out::call(self, n, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bd4d715527d294ee7182d85a264379ef27f72d9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool2d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..de0ffff2d5db05089b6c419d361c281fa8cf2458 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask); + } +} + +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask) -> (Tensor, Tensor) +inline ::std::tuple mps_convolution_transpose_backward_symint(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(output_padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_symint_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } +} + +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, SymInt[] padding, SymInt[] output_padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple mps_convolution_transpose_backward_symint_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); +} +namespace symint { + template ::value>> + ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_layer_norm_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_layer_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..51a34f4f20ae30ef4950d27289f5b1530e4ba403 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_layer_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_layer_norm { + using schema = ::std::tuple (const at::Tensor &, c10::SymIntArrayRef, const ::std::optional &, const ::std::optional &, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_layer_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps); +}; + +struct TORCH_API native_layer_norm_out { + using schema = ::std::tuple (const at::Tensor &, c10::SymIntArrayRef, const ::std::optional &, const ::std::optional &, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_layer_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional & weight, const ::std::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/negative_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/negative_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2cbf842bbd0825a9f0ec4574ad7f0d066f6b0881 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/negative_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API negative { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::negative") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "negative(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API negative_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::negative_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "negative_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API negative_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::negative") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3bd0e7dcefd0d211ffdd142765f649fe5c0dad1f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nll_loss_symint(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100); +TORCH_API at::Tensor & nll_loss_out(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7c9222e419e40a71509ac32e25ba60bf9f4b7f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::permute_copy(Tensor self, int[] dims) -> Tensor +inline at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute_copy::call(self, dims); +} + +// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute_copy_out::call(self, dims, out); +} +// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::permute_copy_out::call(self, dims, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..104a51d6d31322e32c4036d31e861f73220f5797 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/q_per_channel_axis.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/q_per_channel_axis.h new file mode 100644 index 0000000000000000000000000000000000000000..736f371d8cc4fce310b08dcfc837148f5acde329 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/q_per_channel_axis.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::q_per_channel_axis(Tensor self) -> int +inline int64_t q_per_channel_axis(const at::Tensor & self) { + return at::_ops::q_per_channel_axis::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/refine_names.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/refine_names.h new file mode 100644 index 0000000000000000000000000000000000000000..40448cc467889f980a348c714167644b24ec11e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/refine_names.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad2d_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..50681b30bbb12f11b7a62a206890f0452cf7d6bc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad2d_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor replication_pad2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor replication_pad2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad2d_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad3d_backward_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..608763284f8db350b9677e6e5ebb6e401fd1cca2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad3d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor replication_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & replication_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/result_type_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/result_type_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..88b3b769d246f5f2f30e10b1b4a5ad4fc8c30512 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/result_type_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API result_type_Tensor { + using schema = at::ScalarType (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::result_type") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType") + static at::ScalarType call(const at::Tensor & tensor, const at::Tensor & other); + static at::ScalarType redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Tensor & other); +}; + +struct TORCH_API result_type_Scalar { + using schema = at::ScalarType (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::result_type") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType") + static at::ScalarType call(const at::Tensor & tensor, const at::Scalar & other); + static at::ScalarType redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Scalar & other); +}; + +struct TORCH_API result_type_Scalar_Tensor { + using schema = at::ScalarType (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::result_type") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType") + static at::ScalarType call(const at::Scalar & scalar, const at::Tensor & tensor); + static at::ScalarType redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar, const at::Tensor & tensor); +}; + +struct TORCH_API result_type_Scalar_Scalar { + using schema = at::ScalarType (const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::result_type") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType") + static at::ScalarType call(const at::Scalar & scalar1, const at::Scalar & scalar2); + static at::ScalarType redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar1, const at::Scalar & scalar2); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/segment_reduce_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/segment_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5543d4c9de0773bf4860dd305132f5e26b0bb066 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/segment_reduce_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & segment_reduce_out(const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths, const ::std::optional & indices, const ::std::optional & offsets, int64_t axis, bool unsafe, const ::std::optional & initial, at::Tensor & out); +TORCH_API at::Tensor segment_reduce_kernel(const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths={}, const ::std::optional & indices={}, const ::std::optional & offsets={}, int64_t axis=0, bool unsafe=false, const ::std::optional & initial=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/set_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/set_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13f69245b630f17899fc3ae9884b4f2de4233469 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/set_compositeexplicitautograd_dispatch.h @@ -0,0 +1,37 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor set(const at::Tensor & self, at::Storage source); +TORCH_API at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source); +TORCH_API at::Tensor & set_outf(const at::Tensor & self, at::Storage source, at::Tensor & out); +TORCH_API at::Tensor set(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}); +TORCH_API at::Tensor set_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}); +TORCH_API at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}); +TORCH_API at::Tensor & set_outf(const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor & set_symint_out(at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}); +TORCH_API at::Tensor & set_symint_outf(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor set(const at::Tensor & self, const at::Tensor & source); +TORCH_API at::Tensor & set_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & source); +TORCH_API at::Tensor & set_outf(const at::Tensor & self, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor set(const at::Tensor & self); +TORCH_API at::Tensor & set_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & set_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ac4b1a9bed6ff5fbfaa0aa467859a446b8db3da --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, ::std::optional start=::std::nullopt, ::std::optional end=::std::nullopt, int64_t step=1); +TORCH_API at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, ::std::optional start, ::std::optional end, int64_t step, at::Tensor & out); +TORCH_API at::Tensor & slice_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, ::std::optional start=::std::nullopt, ::std::optional end=::std::nullopt, c10::SymInt step=1); +TORCH_API at::Tensor & slice_copy_symint_outf(const at::Tensor & self, int64_t dim, ::std::optional start, ::std::optional end, c10::SymInt step, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6081da479a861a371eb8e4590dae7f12f96cc295 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_erfcx_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_erfcx(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0e_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0e_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a8a26657c9a386c4af215cc7c419b839fede86af --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0e_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_i0e_out : public at::meta::structured_special_i0e { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..213b512b790a359de3ddb5231145459955ddce19 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_log_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_with_sizes_copy_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_with_sizes_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a5c756bb9dd3d57031787b33210ce791b98aa3ed --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_with_sizes_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API split_with_sizes_copy { + using schema = ::std::vector (const at::Tensor &, c10::SymIntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::split_with_sizes_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]") + static ::std::vector call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim); +}; + +struct TORCH_API split_with_sizes_copy_out { + using schema = void (const at::Tensor &, c10::SymIntArrayRef, int64_t, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::split_with_sizes_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()") + static void call(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tan_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tan_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..18daddc556443931ec489041fcf5c5481b1b74f8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tan_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor tan(const at::Tensor & self); +TORCH_API at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & tan_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_dim_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_dim_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e737debc10d916e66acc6a9e83aeb467bba118b2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unique_dim_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple unique_dim(const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_copy.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..25de76f916be8aae14d7a2176a74e9668b8b2f4a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor +inline at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy::call(self, dim); +} + +// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unsqueeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy_out::call(self, dim, out); +} +// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::unsqueeze_copy_out::call(self, dim, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_complex.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f2044dd5a6fa40c5918e5d77ebd73c4f906691 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_complex.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::view_as_complex(Tensor(a) self) -> Tensor(a) +inline at::Tensor view_as_complex(const at::Tensor & self) { + return at::_ops::view_as_complex::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9054bc9e2eeb9cbabe83cd443455ad272907d533 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & view_as_real_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & view_as_real_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d830e8bb2aa6e3b6f518e3168899d50c72a416c3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor view_as_real(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/zeros_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/zeros_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..88da4fff1364ed77f89cca67cdd95430a61a22e7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/zeros_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor zeros(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor zeros(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names); +TORCH_API at::Tensor & zeros_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor zeros(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor zeros_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & zeros_out(at::Tensor & out, at::IntArrayRef size); +TORCH_API at::Tensor & zeros_outf(at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & zeros_symint_out(at::Tensor & out, c10::SymIntArrayRef size); +TORCH_API at::Tensor & zeros_symint_outf(c10::SymIntArrayRef size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at