diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6898e811058945196b90d808831ffa71eb8eac37 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_add_relu_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _add_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & _add_relu_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_aminmax_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_aminmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..331f98c2ae17037a83177ee294993abb5f973167 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_aminmax_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _aminmax { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_aminmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_aminmax(Tensor self) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _aminmax_dim { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_aminmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim); +}; + +struct TORCH_API _aminmax_out { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_aminmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1); +}; + +struct TORCH_API _aminmax_dim_out { + using schema = ::std::tuple (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_aminmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cummax_helper_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cummax_helper_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f8d9b80ec60841285ac03f2d80e3d72b773c712b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cummax_helper_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cummax_helper { + using schema = void (const at::Tensor &, at::Tensor &, at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cummax_helper") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()") + static void call(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cummin_helper_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cummin_helper_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..026f8c161000059569d64e0a795ba6d403f57240 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cummin_helper_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void _cummin_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5bba09c28ab025abe774d899e68f19924a7f914 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _dirichlet_grad_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total); +TORCH_API at::Tensor & _dirichlet_grad_outf(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e586bcf5a60f95a0769df00608772270cdc2afc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_forward_only_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _embedding_bag_forward_only(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const ::std::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..236ad3e09c3af39d54d204f9dd275acdf95a8a2c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fake_quantize_per_tensor_affine_cachemask_tensor_qparams { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); +}; + +struct TORCH_API _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foobar.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foobar.h new file mode 100644 index 0000000000000000000000000000000000000000..3471ab1dd447a6f9bac485e0ae1c871a99c94ceb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foobar.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor +inline at::Tensor _foobar(const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar::call(self, arg1, arg2, arg3); +} + +// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _foobar_out(at::Tensor & out, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out); +} +// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _foobar_outf(const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { + return at::_ops::_foobar_out::call(self, arg1, arg2, arg3, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..302a1d6c5d67ecf64049031f988ff502e733e3a2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_jagged_to_padded_dense_forward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _jagged_to_padded_dense_forward_cpu(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value=0.0); +TORCH_API at::Tensor _fbgemm_jagged_to_padded_dense_forward(const at::Tensor & values, at::TensorList offsets, at::IntArrayRef max_lengths, double padding_value=0.0); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd2f3131bbf8abf00646c51b096348be35fe58c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..38f1ec0786db03f1ee4afd70e99098c1613bbbca --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_slow_conv2d_backward.h @@ -0,0 +1,135 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias); + } +} + +// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), grad_input, grad_weight, grad_bias); + } +} + +// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_symint_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); + } +} + +// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::call(grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); + } +} + +// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +inline ::std::tuple _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask); + } +} + +// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +inline ::std::tuple _slow_conv2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask); + } +} + +// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2); + } +} + +// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, c10::fromIntArrayRefSlow(kernel_size), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), output_mask, out0, out1, out2); + } +} + +// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); + } +} + +// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::call(grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..69308dbcd5c5feba286cae33b831c2258ff5007a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_mm_reduce_impl { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_mm_reduce_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::string_view reduce); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_check_tensor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_check_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..6df54541a642a57baf95581a1f5613bec49c67c1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_check_tensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_check_tensor(Tensor self) -> Tensor +inline at::Tensor _test_check_tensor(const at::Tensor & self) { + return at::_ops::_test_check_tensor::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1bee5bc4c953e0197856c2168eccaad896f3ab9e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _upsample_bilinear2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_bilinear2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b1ac0964407745ae5099f897a5f3d8acb50985e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_wrapped_quantized_linear_prepacked_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_wrapped_quantized_linear_prepacked_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1fbe9cc1c71e17eb57f2247f19ff3dadcf3a25da --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_wrapped_quantized_linear_prepacked_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _wrapped_quantized_linear_prepacked(const at::Tensor & input, const at::Tensor & input_scale, const at::Tensor & input_zero_point, const at::Tensor & packed_weight, const at::Tensor & output_scale, const at::Tensor & output_zero_point, int64_t out_channel); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argmin_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argmin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..28660f04db08f6056934268025b2547993edc3a6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argmin_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor argmin(const at::Tensor & self, ::std::optional dim=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmin_out(at::Tensor & out, const at::Tensor & self, ::std::optional dim=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & argmin_outf(const at::Tensor & self, ::std::optional dim, bool keepdim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6d435a5747740f16f576dabcbe2b2f108e2c9c43 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_asin : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5d9354b362b3aebd41277f97abf8dc090673a66a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_asinh : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool2d_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6be140b1cfa0c4f59a20eaaee58333d18f1ac4e8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool2d_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_avg_pool2d_out_cpu : public at::meta::structured_avg_pool2d { +void impl(const at::Tensor & self, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override, const at::Tensor & out); +}; +struct TORCH_API structured_avg_pool2d_out_cuda : public at::meta::structured_avg_pool2d { +void impl(const at::Tensor & self, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override, const at::Tensor & out); +}; +TORCH_API at::Tensor mkldnn_avg_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, ::std::optional divisor_override=::std::nullopt); +TORCH_API at::Tensor & mkldnn_avg_pool2d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override, at::Tensor & out); +TORCH_API at::Tensor avg_pool2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, ::std::optional divisor_override=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bincount.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bincount.h new file mode 100644 index 0000000000000000000000000000000000000000..d905daf3dfae8140263e0040ba616c734e1fa0b3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bincount.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor +inline at::Tensor bincount(const at::Tensor & self, const ::std::optional & weights={}, int64_t minlength=0) { + return at::_ops::bincount::call(self, weights, minlength); +} + +// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bincount_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & weights={}, int64_t minlength=0) { + return at::_ops::bincount_out::call(self, weights, minlength, out); +} +// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bincount_outf(const at::Tensor & self, const ::std::optional & weights, int64_t minlength, at::Tensor & out) { + return at::_ops::bincount_out::call(self, weights, minlength, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/binomial_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/binomial_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2d9c6fcae13f4ac0cb4fa4d46cf09298bf6bcfa6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/binomial_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & binomial_out(const at::Tensor & count, const at::Tensor & prob, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor _s_binomial_cpu(const at::Tensor & count, const at::Tensor & prob, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor _s_binomial_cuda(const at::Tensor & count, const at::Tensor & prob, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c799ef5b6cf2a6c1a483e6ade2a4c9c0cae3845f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_or_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/block_diag_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/block_diag_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e2733cb5d3ca2a8fbbe3f6f88a30261e78a11a0a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/block_diag_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor block_diag(at::TensorList tensors); +TORCH_API at::Tensor & block_diag_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cauchy_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cauchy_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46d9ba43b3435eea9fa0b7cfcc80227bfa5a7636 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cauchy_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & cauchy_(at::Tensor & self, double median=0, double sigma=1, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/col2im_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/col2im_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bfc57361725f0270bcd6bcb1da8f79bc5de28280 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/col2im_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/column_stack_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/column_stack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00f00f91eaaae3c87ab018fb26c43efbb2e117ae --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/column_stack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor column_stack(at::TensorList tensors); +TORCH_API at::Tensor & column_stack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & column_stack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conj_physical.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conj_physical.h new file mode 100644 index 0000000000000000000000000000000000000000..85eea274004e59a1202004e58d60719deb31f53f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conj_physical.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conj_physical(Tensor self) -> Tensor +inline at::Tensor conj_physical(const at::Tensor & self) { + return at::_ops::conj_physical::call(self); +} + +// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::conj_physical_out::call(self, out); +} +// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::conj_physical_out::call(self, out); +} + +// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & conj_physical_(at::Tensor & self) { + return at::_ops::conj_physical_::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..91af25b478e5c42da940327473e247b708e93ba3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cos_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor cos(const at::Tensor & self); +TORCH_API at::Tensor & cos_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cosh.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cosh.h new file mode 100644 index 0000000000000000000000000000000000000000..227a63c68f86d71681fe1dcc0e3da6fcdce94cb5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cosh.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cosh(Tensor self) -> Tensor +inline at::Tensor cosh(const at::Tensor & self) { + return at::_ops::cosh::call(self); +} + +// aten::cosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & cosh_(at::Tensor & self) { + return at::_ops::cosh_::call(self); +} + +// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::cosh_out::call(self, out); +} +// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::cosh_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06300d430b9ede5eadcc5129fcbe09d192865e8d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); +TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..dcccc1dc22e3666242a8d1131d3eb9569ea73075 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_cumprod : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diff_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diff_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..220aee166a3e5ad80732db4e1c2f5c81d029f1f3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/diff_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const ::std::optional & prepend={}, const ::std::optional & append={}); +TORCH_API at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const ::std::optional & prepend={}, const ::std::optional & append={}); +TORCH_API at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const ::std::optional & prepend, const ::std::optional & append, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dstack_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dstack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e5f4cedf9c1a38334a21d6740503fd0372476a8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dstack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor dstack(at::TensorList tensors); +TORCH_API at::Tensor & dstack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & dstack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b539c92c5ead61c9c46e99e6041436f7aa24eb8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor fmax(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gt_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..101fbe9bb323615ed66a54d997308b43ea65e5f3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gt_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API gt_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API gt_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gt.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API gt_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API gt_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gt.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API gt__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gt_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API gt__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::gt_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardswish_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardswish_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..959fa22250bebcc516c7fface2d0e9218132fa12 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardswish_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & hardswish_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e980bb6084f9335285247d1f3e3ea7c3d6ae624 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/heaviside_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/i0_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/i0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9b3666463c2b8eb1353408e8564576b924e80204 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/i0_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API i0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::i0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "i0(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API i0_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::i0_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "i0_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API i0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::i0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37990bb89853a1b5bdc96ded1decff382fee9187 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/igamma_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/imag_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/imag_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2dba96ca0637a7bf25855472134402236576b7fc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/imag_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API imag { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::imag") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "imag(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/is_same_size.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/is_same_size.h new file mode 100644 index 0000000000000000000000000000000000000000..95a1933dc6ecfe544fc1d99ae237fd549849eae3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/is_same_size.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_same_size(Tensor self, Tensor other) -> bool +inline bool is_same_size(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::is_same_size::call(self, other); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/istft.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/istft.h new file mode 100644 index 0000000000000000000000000000000000000000..6c0e92908c5e08e846fe936b984449696058e399 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/istft.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +inline at::Tensor istft(const at::Tensor & self, int64_t n_fft, ::std::optional hop_length=::std::nullopt, ::std::optional win_length=::std::nullopt, const ::std::optional & window={}, bool center=true, bool normalized=false, ::std::optional onesided=::std::nullopt, ::std::optional length=::std::nullopt, bool return_complex=false) { + return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kaiser_window_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kaiser_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51d2108114bcb0ecfb4c173cbce6e8534817fce1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kaiser_window_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +TORCH_API at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & kaiser_window_beta_out(int64_t window_length, bool periodic, double beta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ldexp_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ldexp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b6c975440222677f559b00fdd9dcd576855663dc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ldexp_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ldexp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ldexp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ldexp.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ldexp_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ldexp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API ldexp_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ldexp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_det_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_det_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3883b8916d6618bb3b19541a7825df61d64b7ad3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_det_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_det(const at::Tensor & A); +TORCH_API at::Tensor & linalg_det_out(const at::Tensor & A, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..96e6e3a304ddc4de65da71fce445aedd391e3d8d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor +inline at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian); +} + +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out); +} +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1728013043da66604f808b08dc921f5ca5afc218 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_lu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true); +TORCH_API ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_and_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_and_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b9350db82ac5f51b197ef56f33e0f57f0c817e34 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_and_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logical_and { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_and") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_and(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_and_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_and_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API logical_and_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logical_and") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ea56b707a0ac0e9398f61c2b7ed8d815afa88f64 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5e06acdba42dc3e8a4b1f332548c913a52bfaffa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} +// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); +} + +// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor +inline at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mode_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mode_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fb5beecaf442434024b8b6b82b03336e68d96c80 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mode_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mode_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple mode(const at::Tensor & self, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple mode(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple mode_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/moveaxis.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/moveaxis.h new file mode 100644 index 0000000000000000000000000000000000000000..82fe27c47b1525ed80c8296e6d3f16e488574a86 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/moveaxis.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +inline at::Tensor moveaxis(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + return at::_ops::moveaxis_intlist::call(self, source, destination); +} + +// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) +inline at::Tensor moveaxis(const at::Tensor & self, int64_t source, int64_t destination) { + return at::_ops::moveaxis_int::call(self, source, destination); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_group_norm_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_group_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f76ab00bd5220a8b05eaedf3b4eb5c184460ef51 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_group_norm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple math_group_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps); +TORCH_API ::std::tuple native_group_norm_out_symint(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple native_group_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..86a8bb6b77a8940bc429f171bd18336601df1adb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nll_loss2d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss2d_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +TORCH_API at::Tensor nll_loss2d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss2d_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..059ea6224926a94e74fa2e037025b005cc416c29 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor +inline at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype); +} + +// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor +inline at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype); +} + +// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] +inline ::std::vector quantize_per_tensor(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype); +} + +// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_out::call(self, scale, zero_point, dtype, out); +} +// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_out::call(self, scale, zero_point, dtype, out); +} + +// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::call(self, scale, zero_point, dtype, out); +} +// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantize_per_tensor_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::call(self, scale, zero_point, dtype, out); +} + +// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () +inline void quantize_per_tensor_out(at::TensorList out, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out); +} +// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () +inline void quantize_per_tensor_outf(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { + return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/result_type.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/result_type.h new file mode 100644 index 0000000000000000000000000000000000000000..33426c56b0f7e50bf024f682f865f7d05c484d2c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/result_type.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType +inline at::ScalarType result_type(const at::Tensor & tensor, const at::Tensor & other) { + return at::_ops::result_type_Tensor::call(tensor, other); +} + +// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType +inline at::ScalarType result_type(const at::Tensor & tensor, const at::Scalar & other) { + return at::_ops::result_type_Scalar::call(tensor, other); +} + +// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType +inline at::ScalarType result_type(const at::Scalar & scalar, const at::Tensor & tensor) { + return at::_ops::result_type_Scalar_Tensor::call(scalar, tensor); +} + +// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType +inline at::ScalarType result_type(const at::Scalar & scalar1, const at::Scalar & scalar2) { + return at::_ops::result_type_Scalar_Scalar::call(scalar1, scalar2); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scalar_tensor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scalar_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..fd847ce3c0bdcfd617be8a353d6c0ee59a06fbd1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scalar_tensor.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options={}) { + return at::_ops::scalar_tensor::call(s, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor scalar_tensor(const at::Scalar & s, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::scalar_tensor::call(s, dtype, layout, device, pin_memory); +} + +// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scalar_tensor_out(at::Tensor & out, const at::Scalar & s) { + return at::_ops::scalar_tensor_out::call(s, out); +} +// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & scalar_tensor_outf(const at::Scalar & s, at::Tensor & out) { + return at::_ops::scalar_tensor_out::call(s, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c464e0aa96934e63d4afcf3e162ecd92771e1d8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softmax_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..11a8be3c6036ce9aeba2792a6dcf26606916e5c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor softmax(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & softmax_out(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor softmax(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_y1_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_y1_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a09ce1fa10b9a7870e515676a7ab890e68db972d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_y1_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_bessel_y1_out : public at::meta::structured_special_bessel_y1 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..678cac4a92cdbc8641b5293a52b11c4c4211c2ec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_digamma_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_digamma(const at::Tensor & self); +TORCH_API at::Tensor & special_digamma_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_digamma_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sub_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sub_native.h new file mode 100644 index 0000000000000000000000000000000000000000..577056bd84523b19e5621885928e12a59c170a33 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sub_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sub_out : public at::meta::structured_sub_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_sub_Tensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_out_sparse(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_sparse_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub_zerotensor(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5a449f4425c7e5aeac0079f2d89b8e0f6c34596e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sum(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e6753baba53931e77faee19f7a7f197229656b1c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tensor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..2f72b7ef02637983d35f5049bdb6d1bd613cd2bd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tensor.h @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace at { + +// These functions are defined in ATen/Utils.cpp. +#define TENSOR(T, S) \ + TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); \ + inline Tensor tensor( \ + std::initializer_list values, const TensorOptions& options) { \ + return at::tensor(ArrayRef(values), options); \ + } \ + inline Tensor tensor(T value, const TensorOptions& options) { \ + return at::tensor(ArrayRef(value), options); \ + } \ + inline Tensor tensor(ArrayRef values) { \ + return at::tensor(std::move(values), at::dtype(k##S)); \ + } \ + inline Tensor tensor(std::initializer_list values) { \ + return at::tensor(ArrayRef(values)); \ + } \ + inline Tensor tensor(T value) { \ + return at::tensor(ArrayRef(value)); \ + } +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR) +AT_FORALL_COMPLEX_TYPES(TENSOR) +#undef TENSOR + +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_padded_tensor_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_padded_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f4d64eb9b408cfa2c9480ab2bab940f5fd1ed1c8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_padded_tensor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_padded_tensor { + using schema = at::Tensor (const at::Tensor &, double, at::OptionalSymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_padded_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size); +}; + +struct TORCH_API to_padded_tensor_out { + using schema = at::Tensor & (const at::Tensor &, double, at::OptionalSymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_padded_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_bsr_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_bsr_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..854371b04f6edd84a2106bddbccfda4c86836315 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_bsr_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor to_sparse_bsr(const at::Tensor & self, at::IntArrayRef blocksize, ::std::optional dense_dim=::std::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_csr_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_csr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8584e9beb8632230facb2b31ae9b1352014d9fb2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/to_sparse_csr_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor to_sparse_csr(const at::Tensor & self, ::std::optional dense_dim=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5160db093adeedbdbbe7f6fd6e76d24c223a19a1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trace_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API trace { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trace") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trace(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API trace_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trace") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_copy_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c2c88cc905fb12a37e99be0c1fbb05fcdfa662e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unfold_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & unfold_copy_out(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out); +TORCH_API at::Tensor unfold_copy(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bicubic2d_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bicubic2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b242af63eaae01fc3272a3eb66cc0acf5c85b532 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bicubic2d_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, ::std::optional> scale_factors); +struct TORCH_API structured_upsample_bicubic2d_out_cpu : public at::meta::structured_upsample_bicubic2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_bicubic2d_out_cuda : public at::meta::structured_upsample_bicubic2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ce9dcc94c9058cf57a308b913862b376611155e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +inline at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); +} +namespace symint { + template ::value>> + at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); + } +} + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +inline at::Tensor value_selecting_reduction_backward_symint(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); +} +namespace symint { + template ::value>> + at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/values_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/values_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..053c463044e25f5405d6639686127f43531cef4e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/values_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API values { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "values(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f27e327fe80e869547790be0f7b14717736f485a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/vander_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor vander(const at::Tensor & x, ::std::optional N=::std::nullopt, bool increasing=false); + +} // namespace compositeimplicitautograd +} // namespace at