diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1164daf58526c797f3b16f9646130512c1107659 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & _adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_assert_scalar.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_assert_scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..f6f86ffd46d76e6241e044191e528c1f25521851 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_assert_scalar.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_assert_scalar(Scalar self, str assert_msg) -> () +inline void _assert_scalar(const at::Scalar & self, c10::string_view assert_msg) { + return at::_ops::_assert_scalar::call(self, assert_msg); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Short_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Short_native.h new file mode 100644 index 0000000000000000000000000000000000000000..96ca0518fd1a1121d805a374e59912ee84ad75f2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Short_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc9099cb356e4911015355a8b6b7ed3dbe67e0b8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cholesky_solve_helper_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_flash_attention_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_flash_attention_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ca59a936e57aac764eefc6a64ad026f46c2edbde --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_flash_attention_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _flash_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale=::std::nullopt, ::std::optional window_size_left=::std::nullopt, ::std::optional window_size_right=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foobar_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foobar_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc90a22df20302e07b540d5a0561b4a5aa3c8c6a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foobar_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _foobar(const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_acos_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_acos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4d04807b6c6720a46234e2e2da8c965fd79a6acd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_acos_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector foreach_tensor_acos_slow(at::TensorList self); +TORCH_API void _foreach_acos_out(at::TensorList self, at::TensorList out); +TORCH_API void foreach_tensor_acos_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_acos_cuda(at::TensorList self); +TORCH_API void foreach_tensor_acos_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_lerp.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_lerp.h new file mode 100644 index 0000000000000000000000000000000000000000..7115772b970211ea64838f7f6a00b177d72f9902 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_lerp.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] +inline ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + return at::_ops::_foreach_lerp_List::call(self, tensors1, weights); +} + +// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () +inline void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + return at::_ops::_foreach_lerp__List::call(self, tensors1, weights); +} + +// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] +inline ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight); +} + +// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () +inline void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight); +} + +// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () +inline void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, at::TensorList weights) { + return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out); +} +// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () +inline void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) { + return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out); +} + +// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () +inline void _foreach_lerp_out(at::TensorList out, at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) { + return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out); +} +// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () +inline void _foreach_lerp_outf(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) { + return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adam.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adam.h new file mode 100644 index 0000000000000000000000000000000000000000..99142cea85f1233e578b62770046d17fc028777c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adam.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..cbb7035b8ec58ad2122d66397e789d4499a95bc7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_slogdet_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__linalg_slogdet : public at::impl::MetaBase { + + + void meta(const at::Tensor & A); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fdddb423693192eaab72354b6ace4368db1c23ff --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_logcumsumexp.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_logcumsumexp.h new file mode 100644 index 0000000000000000000000000000000000000000..fc686560eb59c41003c558f697c73589ab1c49a7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_logcumsumexp.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_logcumsumexp(Tensor self, int dim) -> Tensor +inline at::Tensor _logcumsumexp(const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp::call(self, dim); +} + +// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp_out::call(self, dim, out); +} +// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::_logcumsumexp_out::call(self, dim, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0c4f59a5c39125b7832c94f078e16ee471356511 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _scaled_dot_product_cudnn_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, bool return_debug_mask=false, ::std::optional scale=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f146bbd89d75fcd3356b64b8c3aa601fef6f3498 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sparse_log_softmax_backward_data_out(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor log_softmax_backward_sparse_cpu(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self); +TORCH_API at::Tensor log_softmax_backward_sparse_cuda(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1b600d5838db8608a6caaef3a2aced8d044481ed --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _thnn_fused_lstm_cell_out(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional & input_bias, const ::std::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple _thnn_fused_lstm_cell_cuda(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional & input_bias={}, const ::std::optional & hidden_bias={}); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4b2b8e738af1639b54020aa79537b872931fab5e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor alias_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/align_as_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/align_as_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2013ec008cbd6b265d71ebbe096c64cc84a415a1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/align_as_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API align_as { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::align_as") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "align_as(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5ac7db91e25df7ee4eae5d59a12f50249715d4f9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amin_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor amin(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/aminmax_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/aminmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..55e4eed05ca301922083b136743b506cad0abc05 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/aminmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_aminmax : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, ::std::optional dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2ca4df39461d08b1800e45d2a959ba27bea91a7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/avg_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, ::std::optional divisor_override); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bincount_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bincount_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a40cfd2fc09786782b245bf39f1e67df888213a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bincount_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bincount(const at::Tensor & self, const ::std::optional & weights={}, int64_t minlength=0); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ceil_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ceil_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cb65fcf47b89f6cf6be6c1127cc0c5e3adc8c9fd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ceil_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ceil { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API ceil_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API ceil_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chain_matmul_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chain_matmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9354c948a10f649e7456946726e343bff5a644b4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/chain_matmul_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API chain_matmul { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::chain_matmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "chain_matmul(Tensor[] matrices) -> Tensor") + static at::Tensor call(at::TensorList matrices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices); +}; + +struct TORCH_API chain_matmul_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::chain_matmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList matrices, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3aa45802c9e6dad3c7611df2d44375fcd4f37a31 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/constant_pad_nd_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor constant_pad_nd_symint(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_outf(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & constant_pad_nd_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value=0); +TORCH_API at::Tensor & constant_pad_nd_symint_outf(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b8e96219f08395a7786a4eb1350672d59b58fffe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_transpose1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv_transpose1d(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1); +TORCH_API at::Tensor conv_transpose1d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_is_acceptable.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_is_acceptable.h new file mode 100644 index 0000000000000000000000000000000000000000..b9bcc9868822a7da817d9f457192b5070cd79d17 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_is_acceptable.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cudnn_is_acceptable(Tensor self) -> bool +inline bool cudnn_is_acceptable(const at::Tensor & self) { + return at::_ops::cudnn_is_acceptable::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_hfft.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_hfft.h new file mode 100644 index 0000000000000000000000000000000000000000..6c5adad3986d336bcf0167eebfabc2d78ffb0efc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_hfft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_hfft(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_hfft(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm); + } +} + +// aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_hfft_symint(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_hfft(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft::call(self, n, dim, norm); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_outf(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_outf(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n.has_value() ? ::std::make_optional(c10::SymInt(*n)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_symint_out(at::Tensor & out, const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_hfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_hfft_symint_outf(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_hfft_outf(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_ihfft2_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_ihfft2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00045ce2684ac68b8c45807f961c8b06523ad1c3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_ihfft2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_ihfft2 { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ihfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm); +}; + +struct TORCH_API fft_ihfft2_out { + using schema = const at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, ::std::optional, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ihfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, const at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_rfftn_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_rfftn_native.h new file mode 100644 index 0000000000000000000000000000000000000000..efcd4c22dfa96b2189698a583e111171e8da5be0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_rfftn_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_rfftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_rfftn_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..637e920813d050358e6ab607ee9c6b0021eb8528 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h @@ -0,0 +1,33 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & float_power_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/floor_divide_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/floor_divide_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..30398ac0dba80eb712353f8da920901ca2a735cc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/floor_divide_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API floor_divide { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API floor_divide__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API floor_divide_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API floor_divide_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API floor_divide__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API floor_divide_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f0e4c91614cb3efd3276c809bf44f3cd17dca6fd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..100902cef992de25d43b9c97379b417ca54bdb2b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API glu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input); +}; + +struct TORCH_API glu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::glu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gru_cell_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gru_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4384a49b362c3a525b8421471dac1bf8626512a0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gru_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4fbd4a144d95ad8c8fe8aa7b9fc496df7dde1b1e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46aae89384310e93a51ad04c0626b6c782ad745c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardtanh_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a00a88d2ae924fec00dff4a255dd40aa4fe1912 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_reduce_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isclose_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isclose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f4ffcff61bb1758edbe9402f242f8070e51967eb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isclose_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isclose { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isclose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e8ad855dda3d589b9df3ec6d86da5d65220b07c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isposinf_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor isposinf(const at::Tensor & self); +TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kthvalue_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kthvalue_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b8d133a1995d3b0f9cb4c8cc6f141983936023c2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kthvalue_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out_cpu(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue_out_cuda(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99b0f6716cedbd169834f261cafd4382d460d38b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/le_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor le(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor le(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lerp.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lerp.h new file mode 100644 index 0000000000000000000000000000000000000000..0f36e9f3fa99ca96eb93b0829fdc702beb80263c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lerp.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar_out::call(self, end, weight, out); +} +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) { + return at::_ops::lerp_Scalar_out::call(self, end, weight, out); +} + +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor_out::call(self, end, weight, out); +} +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lerp_outf(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::lerp_Tensor_out::call(self, end, weight, out); +} + +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar::call(self, end, weight); +} + +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +inline at::Tensor lerp(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor::call(self, end, weight); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_cond.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_cond.h new file mode 100644 index 0000000000000000000000000000000000000000..fa89ed9ac98ad53e8cff4b5415dd7f9060f3d3dc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_cond.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, const ::std::optional & p=::std::nullopt) { + return at::_ops::linalg_cond::call(self, p); +} + +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & p=::std::nullopt) { + return at::_ops::linalg_cond_out::call(self, p, out); +} +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, const ::std::optional & p, at::Tensor & out) { + return at::_ops::linalg_cond_out::call(self, p, out); +} + +// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor +inline at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str::call(self, p); +} + +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out) { + return at::_ops::linalg_cond_p_str_out::call(self, p, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c77e5128b6e93f11aed1874a699ded6b6198bfb5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_inv_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false); +TORCH_API ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcbd8ee55d501448ed804d5925eaf1ad56f5732 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_svd_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_svd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..864d9e99a636faeaa854cec6e4fabef9a60b0bb0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_svd_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_svd(const at::Tensor & A, bool full_matrices=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple linalg_svd_out(const at::Tensor & A, bool full_matrices, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2851ca3d6a5a59d1df769f65f85cccbd6b6f18d7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_vector_norm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..322dffc2fbb5f961743483b844984c9018ba91ef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..92a1906f00e7711ad3d0e447c6e72bfa059782b7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +inline ::std::tuple max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c652ea8a4776691a1287fa6eed1afa532e752dd5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool2d_with_indices_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_pool2d_with_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API max_pool2d_with_indices { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_pool2d_with_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_convolution_relu_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_convolution_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b184b8b9ad9aefb60eee4aa5b972f735f0f74ab1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_convolution_relu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_convolution_relu { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_convolution_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..de89acb83954b0d0b10543c2266f0ff5087396fa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor mkldnn_adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self); +} + +// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output, self, out); +} +// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::call(grad_output, self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/narrow_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/narrow_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7bb2ec0e46bd8a918454306952f85352293fbde7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/narrow_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor narrow_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor narrow_nested_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +TORCH_API at::Tensor narrow_tensor_symint(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_batch_norm_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_batch_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..f182453e058b2219e026fa6ddf4891040b2f2a59 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_batch_norm_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +inline ::std::tuple native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); +} + +// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward_out::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); +} +// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_batch_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_invstd, bool train, double eps, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_batch_norm_backward_out::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_norm_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_norm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ccaaef6fe29663a116431cc160ebe26f3a4ba12a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_norm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & native_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2); +TORCH_API at::Tensor & native_norm_outf(const at::Tensor & self, const at::Scalar & p, at::Tensor & out); +TORCH_API at::Tensor & native_norm_out(at::Tensor & out, const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, ::std::optional dtype); +TORCH_API at::Tensor & native_norm_outf(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_forward_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f11b564b416abb3fb6cdb0bd70a1bfb021b7a9e9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss2d_forward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_symint(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight); +TORCH_API ::std::tuple nll_loss2d_forward_symint_out(at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_symint_outf(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, at::Tensor & output, at::Tensor & total_weight); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_forward_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_forward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..bab29ec78e3d9cda8536d90b18bda0981aa5b7fd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_forward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_nll_loss_forward : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ones.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ones.h new file mode 100644 index 0000000000000000000000000000000000000000..726a3965d3214ab71ce1d11c4dcd4c4d52d89f39 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/ones.h @@ -0,0 +1,131 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor ones(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::ones_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor ones(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::ones_names::call(size, names, dtype, layout, device, pin_memory); +} + +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor ones(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor ones(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor ones(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::ones::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor ones(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor ones_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::ones::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor ones(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::ones::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & ones_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ones_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::ones_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & ones_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::ones_out::call(size, out); + } +} + +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ones_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & ones_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::call(size, out); + } +} + +// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ones_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names) { + return at::_ops::ones_names_out::call(size, names, out); +} +// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ones_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::ones_names_out::call(size, names, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pixel_unshuffle_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pixel_unshuffle_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c95da5ccd373dcdc889eb96392388191a8319262 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pixel_unshuffle_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & pixel_unshuffle_out(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out); +TORCH_API at::Tensor pixel_unshuffle_cpu(const at::Tensor & self, int64_t downscale_factor); +TORCH_API at::Tensor math_pixel_unshuffle(const at::Tensor & self, int64_t downscale_factor); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand.h new file mode 100644 index 0000000000000000000000000000000000000000..d71c33de4f2930a08541c655c41785b3753fd0c3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand.h @@ -0,0 +1,377 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); + } +} + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_names::call(size, names, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor rand(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::rand_generator::call(size, generator, dtype, layout, device, pin_memory); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::rand_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::rand_out::call(size, out); + } +} + +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::call(size, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator) { + return at::_ops::rand_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator) { + return at::_ops::rand_generator_out::call(size, generator, out); + } +} + +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::call(size, generator, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional names) { + return at::_ops::rand_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional names) { + return at::_ops::rand_names_out::call(size, names, out); + } +} + +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::call(size, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); + } +} + +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & rand_outf(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::call(size, generator, names, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand_like_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand_like_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..92ad0d09e7f19fdf64505c8560c475b5fd9dd469 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand_like_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rand_like { + using schema = at::Tensor (const at::Tensor &, ::std::optional, ::std::optional, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +}; + +struct TORCH_API rand_like_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rand_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, ::std::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reshape_as_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reshape_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..828f965f41dde7d2cc8af509e2377380d9221bec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reshape_as_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor reshape_as_nested(const at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resize.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resize.h new file mode 100644 index 0000000000000000000000000000000000000000..270142c423906a85709f498694d988c0bd38229a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resize.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +namespace symint { + template ::value>> + const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize_::call(self, c10::fromIntArrayRefSlow(size), memory_format); + } +} + +namespace symint { + template ::value>> + const at::Tensor & resize_(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize_::call(self, size, memory_format); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize_out::call(self, size, memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize_out::call(self, size, memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, size, memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_outf(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, size, memory_format, out); + } +} + +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format); +} +namespace symint { + template ::value>> + at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format); + } +} + +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize::call(self, size, memory_format); +} +namespace symint { + template ::value>> + at::Tensor resize(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format=::std::nullopt) { + return at::_ops::resize::call(self, size, memory_format); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rnn_relu_cell_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rnn_relu_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..17787168cc6d3cf18bbe279af851c37a63acfcc7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rnn_relu_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_native.h new file mode 100644 index 0000000000000000000000000000000000000000..88be9de0629ff3fbbb9a3bab3c20a542b83419cc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scatter_native.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_scatter_src_out : public at::meta::structured_scatter_src { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, const at::Tensor & out); +}; +struct TORCH_API structured_scatter_value_out : public at::meta::structured_scatter_value { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, const at::Tensor & out); +}; +struct TORCH_API structured_scatter_reduce_out : public at::meta::structured_scatter_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, const at::Tensor & out); +}; +struct TORCH_API structured_scatter_value_reduce_out : public at::meta::structured_scatter_value_reduce { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, const at::Tensor & out); +}; +TORCH_API at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f1c26ae54791e95a87525ad90634677e4a04c533 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_softplus_out : public at::meta::structured_softplus { +void impl(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aaf2763b9caccb028dc23b2a9809151bed5c0614 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sort_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple sort(const at::Tensor & self, ::std::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, ::std::optional stable, int64_t dim=-1, bool descending=false); +TORCH_API ::std::tuple sort_outf(const at::Tensor & self, ::std::optional stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_j0_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_j0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9701de88f63ddf403f675a559fbf8867b802bae1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_j0_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_bessel_j0(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_j0_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_j0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a58b8709f6a0360a242d4945bdfaa7ff4c73cea3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_bessel_j0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_bessel_j0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e8ebbc1e66ecf2b94b3af21df2f9021c83311fd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_t_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_with_sizes.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_with_sizes.h new file mode 100644 index 0000000000000000000000000000000000000000..ffa1ca43b63b230741a5e58f76cd6500d07835e4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/split_with_sizes.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); +} +namespace symint { + template ::value>> + ::std::vector split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); + } +} + +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, split_sizes, dim); +} +namespace symint { + template ::value>> + ::std::vector split_with_sizes(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::call(self, split_sizes, dim); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sym_storage_offset_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sym_storage_offset_native.h new file mode 100644 index 0000000000000000000000000000000000000000..09821bb9b5604dda09d7e2d46b336f3b5f722a2a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sym_storage_offset_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API c10::SymInt sym_storage_offset(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/threshold_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/threshold_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72e37ccd2b5ca05e96d857585f13b3789e4be55f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/threshold_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dac4e757fbccaf75d7094e5e88e8faefffa0b689 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/triu_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor triu(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_(at::Tensor & self, int64_t diagonal=0); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bilinear2d_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bilinear2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..69461603fb7e765f8a0edabaf363bf5149da6f65 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_bilinear2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor upsample_bilinear2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_bilinear2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace cuda +} // namespace at