diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddcb0dc25ad9d91e2069a7d5edcddccbfcf3df59 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_aminmax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self); +TORCH_API ::std::tuple _aminmax_outf(const at::Tensor & self, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple _aminmax_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple _aminmax_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2690e00ec786c48d587e5e26527acf3684cf24aa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_efficient_attention_forward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _efficient_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & bias, const ::std::optional & cu_seqlens_q, const ::std::optional & cu_seqlens_k, ::std::optional max_seqlen_q, ::std::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional scale=::std::nullopt, const ::std::optional & seqlen_k={}, ::std::optional window_size=::std::nullopt); +TORCH_API ::std::tuple _efficient_attention_forward_symint(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & bias, const ::std::optional & cu_seqlens_q, const ::std::optional & cu_seqlens_k, ::std::optional max_seqlen_q, ::std::optional max_seqlen_k, double dropout_p, int64_t custom_mask_type, bool compute_log_sumexp=false, ::std::optional scale=::std::nullopt, const ::std::optional & seqlen_k={}, ::std::optional window_size=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_ceil.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_ceil.h new file mode 100644 index 0000000000000000000000000000000000000000..bef81a7983fb37cf4e484bf2133fbe9176aa520e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_ceil.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_ceil(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_ceil(at::TensorList self) { + return at::_ops::_foreach_ceil::call(self); +} + +// aten::_foreach_ceil_(Tensor(a!)[] self) -> () +inline void _foreach_ceil_(at::TensorList self) { + return at::_ops::_foreach_ceil_::call(self); +} + +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_ceil_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_ceil_out::call(self, out); +} +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_ceil_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_ceil_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_norm_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cfbd55893f9eba45cdf35ae42176d364eb2a3089 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_norm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_norm(at::TensorList self, const at::Scalar & ord=2, ::std::optional dtype=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adamw_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adamw_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b08f69cfc0d318039d63c279165d90fb4161119f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adamw_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void _fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +TORCH_API void _fused_adamw_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adamw_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adamw_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7e5a45031b32040ebbc9073b2fea10dd963b6b3a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_adamw_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fused_adamw_ { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_adamw__tensor_lr { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_adamw_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_adamw { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, double, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +struct TORCH_API _fused_adamw_tensor_lr_out { + using schema = void (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +}; + +struct TORCH_API _fused_adamw_tensor_lr { + using schema = ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> (at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, at::TensorList, const at::Tensor &, double, double, double, double, bool, bool, const ::std::optional &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_adamw") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensor_lr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_adamw.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)") + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> call(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); + static ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_int_mm.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_int_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..b5c5f62fa1917bcb3f74948d09ee92d2145c87fe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_int_mm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_int_mm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor _int_mm(const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::_int_mm::call(self, mat2); +} + +// aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _int_mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::_int_mm_out::call(self, mat2, out); +} +// aten::_int_mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _int_mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::_int_mm_out::call(self, mat2, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_check_errors_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_check_errors_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1c05e289b867f2d3fce90279edd94ca63824e88e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_check_errors_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _linalg_check_errors { + using schema = void (const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_linalg_check_errors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()") + static void call(const at::Tensor & info, c10::string_view api_name, bool is_matrix); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & info, c10::string_view api_name, bool is_matrix); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_det_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_det_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e4e27432fae033147ad24486861053566fb5ff18 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_linalg_det_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__linalg_det : public at::impl::MetaBase { + + + void meta(const at::Tensor & A); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_get_lengths_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_get_lengths_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..56b181a7fd571aad982b60106edf48a5d8c54af4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_get_lengths_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_get_lengths { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_get_lengths") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_get_lengths(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..0a8f3798d37b82cc8f27b9f403dd8c0c35b9b426 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_view_from_buffer.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor(a) +inline at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) { + return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dc9d5e9c2d0eeed87033ea3bf28add58059e15ed --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nnpack_spatial_convolution { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nnpack_spatial_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride); +}; + +struct TORCH_API _nnpack_spatial_convolution_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nnpack_spatial_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..754771748d639c0bfa3e3d7bcbb8385b7ad88f9b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pin_memory_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _pin_memory(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API at::Tensor & _pin_memory_out(at::Tensor & out, const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API at::Tensor & _pin_memory_outf(const at::Tensor & self, ::std::optional device, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5d20087f83baaab12a71827ae6f2b5cc5c522995 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_prelu_kernel_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _prelu_kernel_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight); +TORCH_API ::std::tuple mkldnn_prelu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_remove_batch_dim_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_remove_batch_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3da349b246439fc086021f110751591fc0fda782 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_remove_batch_dim_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _remove_batch_dim(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ea965379c183dcb2c5a5609ba9a74d67480d540 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, ::std::optional scale=::std::nullopt); +TORCH_API ::std::tuple _scaled_dot_product_cudnn_attention_backward_symint(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, const at::Tensor & attn_bias, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, ::std::optional scale=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_softmax_backward_data_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_softmax_backward_data_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2ee15f990c25a53ab81552e83d3b729e7147a86a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_softmax_backward_data_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__softmax_backward_data : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_softmax_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..577ef30cc78b35c499f4148fcdfa6ceb5885daf9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_softmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_softmax(Tensor self, int dim, bool half_to_float) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +struct TORCH_API _softmax_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eb6dc75b3cb97cf591f4cc7672de6cb56eb8a14f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_serialization_subcmul { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_serialization_subcmul") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..64be7c3c2c820c02e432fee7327688848c128e48 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _upsample_nearest_exact1d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, ::std::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors); +}; + +struct TORCH_API _upsample_nearest_exact1d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales, at::Tensor & out); +}; + +struct TORCH_API _upsample_nearest_exact1d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_upsample_nearest_exact1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d.h new file mode 100644 index 0000000000000000000000000000000000000000..e273cf40545e63882ca67e271408b43132ba172e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +inline at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, ::std::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*output_size)) : ::std::nullopt, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, ::std::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*output_size)) : ::std::nullopt, scale_factors); + } +} + +// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +inline at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact2d(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors); + } +} + +// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); + } +} + +// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w, out); + } +} + +// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out); + } +} + +// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::call(self, output_size, scales_h, scales_w, out); + } +} + +// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d::call(self, c10::fromIntArrayRefSlow(output_size), scales_h, scales_w); + } +} + +// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt) { + return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_validate_sparse_csr_tensor_args_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_validate_sparse_csr_tensor_args_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4acdcf5ec7ea64093883c2faa8014d4e73d9d791 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_validate_sparse_csr_tensor_args_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _validate_sparse_csr_tensor_args(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e84837bf96056eab70a53f18ab8eebadc1bd5f8e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..674ad5fa72774174cb83781f5b458b9b2fbd8b0e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool3d_backward_out_cpu : public at::meta::structured_adaptive_max_pool3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +struct TORCH_API structured_adaptive_max_pool3d_backward_out_cuda : public at::meta::structured_adaptive_max_pool3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2958f75befdb414a595f9d9971228e13d9f371e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/amax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/any_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/any_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d39662daffd711b5d2ad94ad845c6cb88fd2e401 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/any_cpu_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & any_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor any(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & any_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor any(const at::Tensor & self); +TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & any_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arccosh_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arccosh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2e648a299bcdc2f9476bfee79099c46b9487d9be --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arccosh_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor arccosh(const at::Tensor & self); +TORCH_API at::Tensor & arccosh_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arccosh_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arcsinh_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arcsinh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..63b9b729d417e7f69ebcc2cb1a8282335c09839c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arcsinh_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor arcsinh(const at::Tensor & self); +TORCH_API at::Tensor & arcsinh_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arcsinh_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd96863997d2b24756dbb309809e0d503347229a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor asinh(const at::Tensor & self); +TORCH_API at::Tensor & asinh_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dbd8a0612bea8fd38fedbf360d5ee31d1d80c69 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor atleast_2d(const at::Tensor & self); +TORCH_API ::std::vector atleast_2d(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdfe14ef8202905e85f96862186029c4a6870aa4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API convolution_backward_overrideable { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::SymIntArrayRef, c10::SymInt, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::convolution_backward_overrideable") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); +}; + +struct TORCH_API convolution_backward_overrideable_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::SymIntArrayRef, c10::SymInt, ::std::array, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::convolution_backward_overrideable") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a282b5c142cb9774ae037f805645912e0ec3fc17 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double exponential_average_factor, double epsilon); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ea395fda1178ed8442b2519b768bb1f9336f60e0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cudnn_convolution_transpose_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & cudnn_convolution_transpose_out_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); +TORCH_API at::Tensor cudnn_convolution_transpose(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dstack_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dstack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c35a19f8254889aaf731df1fe846a53a557e3335 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dstack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor dstack(at::TensorList tensors); +TORCH_API at::Tensor & dstack_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/exp2_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/exp2_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..53170201b578fabc1676e50c4ff09fbf4ff4dab5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/exp2_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor exp2(const at::Tensor & self); +TORCH_API at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & exp2_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_hfftn_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_hfftn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6d2ef79ee0ed3bff77f507eba562d57188282696 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_hfftn_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_hfftn { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::OptionalIntArrayRef, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_hfftn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm); +}; + +struct TORCH_API fft_hfftn_out { + using schema = const at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::OptionalIntArrayRef, ::std::optional, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_hfftn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, const at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frobenius_norm_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frobenius_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a4afd84d9678136255beda99578984648aecf7c2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/frobenius_norm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/histc.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/histc.h new file mode 100644 index 0000000000000000000000000000000000000000..e4744841bba93e429f84064d60f7c979aa718631 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/histc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_out(at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & histc_outf(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) { + return at::_ops::histc_out::call(self, bins, min, max, out); +} + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +inline at::Tensor histc(const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc::call(self, bins, min, max); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f894d1b0cc209549f95060ce8d2a94b08ceabb54 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/im2col_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7404a372626b2999e9a273ed241a1cea2521a2d9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/int_repr_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & int_repr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & int_repr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/is_complex.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/is_complex.h new file mode 100644 index 0000000000000000000000000000000000000000..75f32c6b4d825023d92e56fd38ae04a77c2d5376 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/is_complex.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_complex(Tensor self) -> bool +inline bool __dispatch_is_complex(const at::Tensor & self) { + return at::_ops::is_complex::call(self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isin_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6c8c90743107a29e6fb9900173870846ee5ab9a5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isin_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isin_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor") + static at::Tensor call(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +struct TORCH_API isin_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor") + static at::Tensor call(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); +}; + +struct TORCH_API isin_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out); +}; + +struct TORCH_API isin_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor") + static at::Tensor call(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kaiser_window.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kaiser_window.h new file mode 100644 index 0000000000000000000000000000000000000000..7320d3e90145e1961c057db606771dc5f062acbd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/kaiser_window.h @@ -0,0 +1,79 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::call(window_length, out); +} +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::call(window_length, out); +} + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} + +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_inv_ex.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_inv_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..62b9480e83900573b9dc5f6dd954a5dcb10ea4a4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_inv_ex.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) +inline ::std::tuple linalg_inv_ex(const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex::call(A, check_errors); +} + +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +inline ::std::tuple linalg_inv_ex_out(at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info); +} +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +inline ::std::tuple linalg_inv_ex_outf(const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) { + return at::_ops::linalg_inv_ex_inverse::call(A, check_errors, inverse, info); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_slogdet_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_slogdet_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0e1c769508189d1cfef4e106c24d3f881625ff05 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_slogdet_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple linalg_slogdet_out(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lu_solve_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lu_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..949a0a64f1893d2e65cff291434650ee5f6b7f78 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lu_solve_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots); +TORCH_API at::Tensor & lu_solve_out(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..867cdb3fa0ee326410e7c93e1b27b0fcaab3e100 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mH_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor mH(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..062ae20866c71d27358f8fc6acc46e7a1098d616 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & median_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & median_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mm_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..401d5c33c74252a465988ded89e34c4825512321 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mm_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2); +TORCH_API at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2); +TORCH_API at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mode_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mode_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e550c1c3c3572fd392da925942aa5f999a763975 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mode_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple mode(const at::Tensor & self, int64_t dim=-1, bool keepdim=false); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mvlgamma_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mvlgamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8627338748b7864cdc7b2acc342bdaa1dde927a2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mvlgamma_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor mvlgamma(const at::Tensor & self, int64_t p); +TORCH_API at::Tensor & mvlgamma_(at::Tensor & self, int64_t p); +TORCH_API at::Tensor & mvlgamma_out(const at::Tensor & self, int64_t p, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nansum_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nansum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7d21d327983f5c00fc7df333a48db0e2e20345c1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nansum_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & nansum_out(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_group_norm_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_group_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2522879d438748f8bfdd559a1f76c55bac523928 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_group_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_group_norm { + using schema = ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, c10::SymInt, c10::SymInt, c10::SymInt, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_group_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); +}; + +struct TORCH_API native_group_norm_out { + using schema = ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, c10::SymInt, c10::SymInt, c10::SymInt, int64_t, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_group_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a144c8454ef80f54b0d9d1fb1e315c461ca76b8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims); +TORCH_API at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pin_memory_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pin_memory_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d0accd7b3e06f47575440d25671b6ef9b1f7f4a4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/pin_memory_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API pin_memory { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pin_memory") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, ::std::optional device); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional device); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..055fb4a4658d312d388ac8934ac141ff92172570 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & prod_outf(const at::Tensor & self, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/q_per_channel_zero_points.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/q_per_channel_zero_points.h new file mode 100644 index 0000000000000000000000000000000000000000..64889d65f5ca4c057632d9efa38a55862a3b6489 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/q_per_channel_zero_points.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::q_per_channel_zero_points(Tensor self) -> Tensor +inline at::Tensor q_per_channel_zero_points(const at::Tensor & self) { + return at::_ops::q_per_channel_zero_points::call(self); +} + +// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & q_per_channel_zero_points_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::q_per_channel_zero_points_out::call(self, out); +} +// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & q_per_channel_zero_points_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::q_per_channel_zero_points_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..166ec3aa7195408443f5e9ccf2b6df6c85bc0191 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rand_compositeexplicitautograd_dispatch.h @@ -0,0 +1,50 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional names); +TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional names); +TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size, ::std::optional generator, ::std::optional names); +TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names); +TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional names, at::Tensor & out); +TORCH_API at::Tensor rand(at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & rand_out(at::Tensor & out, at::IntArrayRef size); +TORCH_API at::Tensor & rand_outf(at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & rand_symint_out(at::Tensor & out, c10::SymIntArrayRef size); +TORCH_API at::Tensor & rand_symint_outf(c10::SymIntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional generator, at::TensorOptions options={}); +TORCH_API at::Tensor rand(at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, at::TensorOptions options={}); +TORCH_API at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/remainder_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/remainder_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ce9c77f6a0b33925bbb15e391a915e31891849e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/remainder_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor remainder(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & remainder_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad3d.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad3d.h new file mode 100644 index 0000000000000000000000000000000000000000..983e08b516f74499a51acf4cd117d8e3a95c6657 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/replication_pad3d.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad3d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::call(self, c10::fromIntArrayRefSlow(padding), out); + } +} + +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d_out::call(self, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad3d_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d_out::call(self, padding, out); + } +} + +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & replication_pad3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::call(self, padding, out); +} +namespace symint { + template ::value>> + at::Tensor & replication_pad3d_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::call(self, padding, out); + } +} + +// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor +inline at::Tensor replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d::call(self, c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor replication_pad3d(const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d::call(self, c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor +inline at::Tensor replication_pad3d_symint(const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d::call(self, padding); +} +namespace symint { + template ::value>> + at::Tensor replication_pad3d(const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::replication_pad3d::call(self, padding); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c8ef8f162daf1c289c3a41d1f831c8b7a543bd1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor resolve_conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rsub_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rsub_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd81002989c7b2e1f7f806d3713ae4af476e8d55 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rsub_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & rsub_Tensor_out(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & rsub_Scalar_out(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/searchsorted_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/searchsorted_native.h new file mode 100644 index 0000000000000000000000000000000000000000..84eba1a61b94610f2645f7f44256f04326d05762 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/searchsorted_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor searchsorted_cpu(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, ::std::optional side=::std::nullopt, const ::std::optional & sorter={}); +TORCH_API at::Tensor & searchsorted_out_cpu(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional side, const ::std::optional & sorter, at::Tensor & out); +TORCH_API at::Tensor searchsorted_cuda(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, ::std::optional side=::std::nullopt, const ::std::optional & sorter={}); +TORCH_API at::Tensor & searchsorted_out_cuda(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, ::std::optional side, const ::std::optional & sorter, at::Tensor & out); +TORCH_API at::Tensor searchsorted_cpu(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, ::std::optional side=::std::nullopt, const ::std::optional & sorter={}); +TORCH_API at::Tensor & searchsorted_out_cpu(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional side, const ::std::optional & sorter, at::Tensor & out); +TORCH_API at::Tensor searchsorted_cuda(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, ::std::optional side=::std::nullopt, const ::std::optional & sorter={}); +TORCH_API at::Tensor & searchsorted_out_cuda(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, ::std::optional side, const ::std::optional & sorter, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d6fec4aecfcb45e2bc1f3d685b78710a1b887fe6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sgn_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sgn { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sgn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sgn(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sgn_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sgn_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sgn_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sgn_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sgn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c2ef5788cd0e0fca89f6ce2dc8429084382b0af3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input); +}; + +struct TORCH_API sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_gammainc.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_gammainc.h new file mode 100644 index 0000000000000000000000000000000000000000..a31e017ac30c1c6065949febb28cb5eb3918605c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_gammainc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_gammainc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc_out::call(self, other, out); +} +// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_gammainc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammainc_out::call(self, other, out); +} + +// aten::special_gammainc(Tensor self, Tensor other) -> Tensor +inline at::Tensor special_gammainc(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc::call(self, other); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0dbd2b384fcf5cac1193e2e87e1d7df0944eed44 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5982698179d238a181d8283720fce7f1a78fad80 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_legendre_polynomial_p { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_legendre_polynomial_p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n); +}; + +struct TORCH_API special_legendre_polynomial_p_x_scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_legendre_polynomial_p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor") + static at::Tensor call(const at::Scalar & x, const at::Tensor & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n); +}; + +struct TORCH_API special_legendre_polynomial_p_n_scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_legendre_polynomial_p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor") + static at::Tensor call(const at::Tensor & x, const at::Scalar & n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n); +}; + +struct TORCH_API special_legendre_polynomial_p_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_legendre_polynomial_p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_legendre_polynomial_p_x_scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_legendre_polynomial_p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +}; + +struct TORCH_API special_legendre_polynomial_p_n_scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_legendre_polynomial_p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00ca6dfc42be8d1c47723a96f188a37f2a0d3d41 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/squeeze_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor squeeze(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, at::Dimname dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tril_indices_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tril_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e000b65aa077989b915be4c733b3c34b82f3691e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/tril_indices_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trunc_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trunc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8dbd174fcf8c1ff25f05dc26291129278b8892ef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/trunc_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API trunc { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trunc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trunc(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API trunc_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trunc_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trunc_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API trunc_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trunc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fcea185ba7b6577fa61e1a23b084f5a0ebe6dc6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/unsqueeze_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor unsqueeze(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & unsqueeze_(at::Tensor & self, int64_t dim); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ed9cfed64700dc71694add38d88d1f46927c8711 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_nearest3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_nearest3d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_complex_copy_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_complex_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5533c1b50c95dd52f8d2b696acdd2119a53f5a5b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_as_complex_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & view_as_complex_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor view_as_complex_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6aab74f0bb16a4ff25ff5e8b09ea910c660d47d9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __ixor__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __ixor__(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at