| | #pragma once |
| |
|
| | |
| |
|
| | #include <tuple> |
| | #include <vector> |
| |
|
| | |
| | |
| | |
| | #include <ATen/core/ATen_fwd.h> |
| |
|
| | namespace at { |
| | namespace _ops { |
| |
|
| |
|
| | struct TORCH_API adaptive_avg_pool2d_out { |
| | using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); |
| | using ptr_schema = schema*; |
| | |
| | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") |
| | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") |
| | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)") |
| | static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); |
| | static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); |
| | }; |
| |
|
| | struct TORCH_API adaptive_avg_pool2d { |
| | using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); |
| | using ptr_schema = schema*; |
| | |
| | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") |
| | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") |
| | STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor") |
| | static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size); |
| | static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size); |
| | }; |
| |
|
| | }} |
| |
|