diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1164daf58526c797f3b16f9646130512c1107659 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & _adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1484d3c8994ae797f5d4f08837aea71c6acb3e4a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cast_Short { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Short") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Short(Tensor self, bool non_blocking=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool non_blocking); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_euclidean_dist_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_euclidean_dist_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3bdf109b8724dbe6ff7c4fee18486ae082144f0f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_euclidean_dist_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _euclidean_dist(const at::Tensor & x1, const at::Tensor & x2); +TORCH_API at::Tensor & _euclidean_dist_out(const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos.h new file mode 100644 index 0000000000000000000000000000000000000000..0a95bb778450899e83badd0f4522ea752207ebee --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_cos(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_cos(at::TensorList self) { + return at::_ops::_foreach_cos::call(self); +} + +// aten::_foreach_cos_(Tensor(a!)[] self) -> () +inline void _foreach_cos_(at::TensorList self) { + return at::_ops::_foreach_cos_::call(self); +} + +// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_cos_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_cos_out::call(self, out); +} +// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_cos_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_cos_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ecb7204569b0c81dc62536c8267530f8f6b3e2b8 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector foreach_tensor_exp_slow(at::TensorList self); +TORCH_API void _foreach_exp_out(at::TensorList self, at::TensorList out); +TORCH_API void foreach_tensor_exp_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_exp_cuda(at::TensorList self); +TORCH_API void foreach_tensor_exp_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg.h new file mode 100644 index 0000000000000000000000000000000000000000..34d29a89929d0ca0721302376ba0ee5811d288c8 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_neg.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_neg(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_neg(at::TensorList self) { + return at::_ops::_foreach_neg::call(self); +} + +// aten::_foreach_neg_(Tensor(a!)[] self) -> () +inline void _foreach_neg_(at::TensorList self) { + return at::_ops::_foreach_neg_::call(self); +} + +// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_neg_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_neg_out::call(self, out); +} +// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_neg_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_neg_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..67e7c425bc0736bc51a96b0e61af703d63e2b4aa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sin_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_sin(at::TensorList self); +TORCH_API void _foreach_sin_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a6034ab6c78984b82917ff897f1a9d60652d626 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & _grid_sampler_2d_cpu_fallback_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9474b3f7cf4a64ca848f5487a3c6b03b20c7f1e2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured__linalg_eigh_out : public at::meta::structured__linalg_eigh { +void impl(const at::Tensor & A, c10::string_view UPLO, bool compute_v, const at::Tensor & eigenvalues, const at::Tensor & eigenvectors); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c41cfe31031e1f3f38dc31fe7ab8dcc2bdb4ecca --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, ::std::optional driver=::std::nullopt); +TORCH_API ::std::tuple _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, ::std::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6ab55e40f8cc9303ec467b800d0ee56906ec5b24 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_from_padded_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_from_padded { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_from_padded") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor") + static at::Tensor call(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213); +}; + +struct TORCH_API _nested_from_padded_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_from_padded") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..0dc824c5a41c9616bd462114f180b6ad99abd32f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor +inline at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax::call(self, dim, half_to_float); +} + +// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax_out::call(self, dim, half_to_float, out); +} +// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_softmax_out::call(self, dim, half_to_float, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_spsolve_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_spsolve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6352124103f826afc4c0e61effdf1b5b4aeb655b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_spsolve_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _sparse_csr_linear_solve(const at::Tensor & A, const at::Tensor & B, bool left=true); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fed21b5588d61d673365905d17bd77fdfa2d5f96 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _standard_gamma_grad_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & output); +TORCH_API at::Tensor & _standard_gamma_grad_outf(const at::Tensor & self, const at::Tensor & output, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..c0d6a3d99795ee43fde8b9468aea8f7049b09518 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _thnn_fused_lstm_cell(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional & input_bias={}, const ::std::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias); +} + +// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _thnn_fused_lstm_cell_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional & input_bias={}, const ::std::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_lstm_cell_out::call(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); +} +// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _thnn_fused_lstm_cell_outf(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const ::std::optional & input_bias, const ::std::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_thnn_fused_lstm_cell_out::call(input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0782200825e01fd5ded2d8565f6b2400dc772756 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _thnn_fused_lstm_cell_backward_impl { + using schema = ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_lstm_cell_backward_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias); +}; + +struct TORCH_API _thnn_fused_lstm_cell_backward_impl_out { + using schema = ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_lstm_cell_backward_impl") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..19212772521073086cf240eb07fce39f404190bb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__upsample_bilinear2d_aa_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f59253f640e54b1f0187cf11acc46f070bff1a9c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5455e809be09c7a1e99717a79ed94a06e2a33f45 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_values_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _values_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _values_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ac33b545328d8f208236ff3a749a347d32a2fa7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_int8pack_mm_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _weight_int8pack_mm(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..76bdc28274e26c2993bfe226dcfbae9429e554c3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/add_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atan_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atan_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6332bfdc50e0112e2184f7f02da88f1b15b2f284 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atan_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor atan(const at::Tensor & self); +TORCH_API at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atan_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dbd8a0612bea8fd38fedbf360d5ee31d1d80c69 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor atleast_2d(const at::Tensor & self); +TORCH_API ::std::vector atleast_2d(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..056c99af9be3f21f319764e7bffe74ceda81577c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_3d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor atleast_3d(const at::Tensor & self); +TORCH_API ::std::vector atleast_3d(at::TensorList tensors); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5eeb0e432de70ad6b991f01e1e8e33089aa7b7ad --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bernoulli { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli(Tensor self, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional generator); +}; + +struct TORCH_API bernoulli_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API bernoulli__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & p, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, ::std::optional generator); +}; + +struct TORCH_API bernoulli__float { + using schema = at::Tensor & (at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "float") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double p, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, ::std::optional generator); +}; + +struct TORCH_API bernoulli_p { + using schema = at::Tensor (const at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double p, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator); +}; + +struct TORCH_API bernoulli_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & p, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API bernoulli_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & p, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, ::std::optional generator); +}; + +struct TORCH_API bernoulli_float_out { + using schema = at::Tensor & (const at::Tensor &, double, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bernoulli") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "float_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_native.h new file mode 100644 index 0000000000000000000000000000000000000000..740bf607e517188e4aa6e7c28d32fd33b10a5dd5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/bincount_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & bincount_out(const at::Tensor & self, const ::std::optional & weights, int64_t minlength, at::Tensor & out); +TORCH_API at::Tensor _bincount_cpu(const at::Tensor & self, const ::std::optional & weights={}, int64_t minlength=0); +TORCH_API at::Tensor _bincount_cuda(const at::Tensor & self, const ::std::optional & weights={}, int64_t minlength=0); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1eb114eb3c0f4a2c408e6c13dd480e2fc7a4e66e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/can_cast_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool can_cast(at::ScalarType from_, at::ScalarType to); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..05e63da3301520cae64511c692fae0baeeed8288 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..0c65607cae86a3f03b9bc500d2c30d4341110e2c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ccol_indices_copy(Tensor self) -> Tensor +inline at::Tensor ccol_indices_copy(const at::Tensor & self) { + return at::_ops::ccol_indices_copy::call(self); +} + +// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ccol_indices_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::ccol_indices_copy_out::call(self, out); +} +// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ccol_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::ccol_indices_copy_out::call(self, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable.h new file mode 100644 index 0000000000000000000000000000000000000000..dd42559783bdab4f12ef77889f8e849f2bdfbce1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +inline ::std::tuple convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask); + } +} + +// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +inline ::std::tuple convolution_backward_overrideable_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); +} +namespace symint { + template ::value>> + ::std::tuple convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } +} + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple convolution_backward_overrideable_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple convolution_backward_overrideable_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); + } +} + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple convolution_backward_overrideable_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple convolution_backward_overrideable_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, output_mask, out0, out1, out2); + } +} + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple convolution_backward_overrideable_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple convolution_backward_overrideable_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } +} + +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple convolution_backward_overrideable_symint_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); +} +namespace symint { + template ::value>> + ::std::tuple convolution_backward_overrideable_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copy_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8822660993a20651df2328825c579cb84e54e992 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false); +TORCH_API at::Tensor & copy_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out); +TORCH_API at::Tensor & copy_(at::Tensor & self, const at::Tensor & src, bool non_blocking=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cross_entropy_loss_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cross_entropy_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8854b9b9e40a6d077130ac24f95fefbe4dbbbf58 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cross_entropy_loss_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cross_entropy_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cross_entropy_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b43360ccf40709a703260b907ce6f53e0d34e2bb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & crow_indices_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & crow_indices_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a2f27b8d988700d8af4fe296113d18d2ddbe3e64 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_cumprod_out : public at::meta::structured_cumprod { +void impl(const at::Tensor & self, int64_t dim, ::std::optional dtype, const at::Tensor & out); +}; +TORCH_API at::Tensor cumprod(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & cumprod_out(const at::Tensor & self, at::Dimname dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumprod_(at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/einsum_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/einsum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ade5e8c4e559cf87585e0b3a56261bafa7b66411 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/einsum_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor einsum(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path=::std::nullopt); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..92420e3c34efc509ea881207be885c5d4a80d65a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout_native.h new file mode 100644 index 0000000000000000000000000000000000000000..df6dc55c53ed3a09d622546e59496f1a196cd921 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor feature_dropout(const at::Tensor & input, double p, bool train); +TORCH_API at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfftn_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfftn_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bded18b232645a8fe518588989ce0e289df474f3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfftn_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_hfftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt); +TORCH_API const at::Tensor & fft_hfftn_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, const at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fix_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fix_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2cab32804671df5c0aff98604b83b6a5d7e7114c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fix_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fix { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fix") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fix(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API fix_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fix_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fix_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API fix_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fix") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a7689c34f2e206bbe5a4565d0fb10ffcf1c281b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/gcd_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gcd(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f0e4c91614cb3efd3276c809bf44f3cd17dca6fd --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2c5086c562e8db26edc7bf88b72c1f4f05695933 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hsplit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c040c728a022c82d6495c6102fe9424e176b9fc3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & huber_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +TORCH_API at::Tensor & huber_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..320b0b3887a4698ce6e5eb634f4f5a0cc9c78130 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::huber_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +}; + +struct TORCH_API huber_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::huber_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a090ebcd807b51431038fbc01ec2ec46986aaafa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..96655ef264ca07e10bef72eec204ed8b376ec8b1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_ops.h @@ -0,0 +1,127 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_fill__int_Scalar { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill_int_Scalar { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill__int_Tensor { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_int_Tensor { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill__Dimname_Scalar { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill__Dimname_Tensor { + using schema = at::Tensor & (at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_Dimname_Scalar { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +}; + +struct TORCH_API index_fill_Dimname_Tensor { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +}; + +struct TORCH_API index_fill_int_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +}; + +struct TORCH_API index_fill_int_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_fill") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a2ae14581517ba37e337fa89f41500694d221020 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/index_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor index(const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_out(at::Tensor & out, const at::Tensor & self, const c10::List<::std::optional> & indices); +TORCH_API at::Tensor & index_outf(const at::Tensor & self, const c10::List<::std::optional> & indices, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_native.h new file mode 100644 index 0000000000000000000000000000000000000000..09905031415aedcf7538584d738d16e69361d57f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_pinned_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_pinned(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API bool is_pinned_sparse_coo(const at::Tensor & self, ::std::optional device=::std::nullopt); +TORCH_API bool is_pinned_sparse_compressed(const at::Tensor & self, ::std::optional device=::std::nullopt); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed.h new file mode 100644 index 0000000000000000000000000000000000000000..8bbc6acc8f6b79810dbc16e9de2a3b8d349b20ad --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/is_signed.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_signed(Tensor self) -> bool +inline bool __dispatch_is_signed(const at::Tensor & self) { + return at::_ops::is_signed::call(self); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..7ec36cb3cd0de71e55c3598eaaba6553c7ff3cba --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isneginf : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5e37440c0bfe1a4071c5086c0bad3293b2052a77 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::call(grad_output, self, negative_slope, self_is_result, grad_input); +} + +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +inline at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..597eb6e7ceacf69e44ecb822adb50311ae0c1d7b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1cbc3acb5c01b08257e44e60e28972a9c34a0a9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace meta +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..880e837780f96ec1f17ef01209125f12d24c47be --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvalsh_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_eigvalsh { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_eigvalsh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::string_view UPLO); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO); +}; + +struct TORCH_API linalg_eigvalsh_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_eigvalsh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..06ee5ac8644d6aa909a6f4b8e22547c5bbdddc1e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_solve_out : public at::meta::structured_linalg_ldl_solve { +void impl(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..214a44bd738cfaf96ea7b63322dd2ddb2ce9b177 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor miopen_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::miopen_convolution_relu::call(self, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor miopen_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::miopen_convolution_relu::call(self, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor miopen_convolution_relu_symint(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor miopen_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups); + } +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mm_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9fb38ed0017c03916617b72e46c40d80d9338c65 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/mm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_mm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & mat2); +}; + +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ca5192365b1bdc0eef82e606070ba156329002b0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement=false, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & multinomial_out(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional generator, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d5a914ce2c69bb76dd17a2f69cc8bd09bd9301dd --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/nansum_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nansum { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nansum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API nansum_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nansum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9acc93e36a7bb4e031f6ebbc72eff0d4ce8df706 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple batch_norm_cpu(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple batch_norm_cpu_out(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +TORCH_API ::std::tuple batch_norm_cuda(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple batch_norm_cuda_out(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +TORCH_API ::std::tuple mkldnn_batch_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps); +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f789b7865dfe5a2b547788b83c0c12630759781b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7959a1545cca5ce4b912363f2bb60c0dc5600c1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_max_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & quantized_max_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API at::Tensor & quantized_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like.h new file mode 100644 index 0000000000000000000000000000000000000000..54503c40c43d4c04de2901bc7747cc429940cd9f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/randn_like.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt) { + return at::_ops::randn_like::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor randn_like(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format) { + return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format); +} + +// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_like_out(at::Tensor & out, const at::Tensor & self, ::std::optional memory_format=::std::nullopt) { + return at::_ops::randn_like_out::call(self, memory_format, out); +} +// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_like_outf(const at::Tensor & self, ::std::optional memory_format, at::Tensor & out) { + return at::_ops::randn_like_out::call(self, memory_format, out); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/random_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/random_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c632c7d5aa0c6d3b518035f4037c2d25b25fa4da --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/random_compositeexplicitautograd_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor random(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_outf(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor random(const at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_outf(const at::Tensor & self, int64_t to, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor random(const at::Tensor & self, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_outf(const at::Tensor & self, ::std::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..699369aed94ddb21b8afc3739d5d45d5aa56fc99 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reflection_pad1d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); +}; + +struct TORCH_API reflection_pad1d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1dedeba48290706ff10dcfe0a0a7a3c31bd8e65c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor repeat_interleave(const at::Tensor & repeats, ::std::optional output_size=::std::nullopt); +TORCH_API at::Tensor repeat_interleave_symint(const at::Tensor & repeats, ::std::optional output_size=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1fc51bd7069ce09217dff0fc872e2c2acb9c03f2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_replication_pad1d_out_cpu : public at::meta::structured_replication_pad1d { +void impl(const at::Tensor & self, at::ArrayRef padding, const at::Tensor & out); +}; +struct TORCH_API structured_replication_pad1d_out_cuda : public at::meta::structured_replication_pad1d { +void impl(const at::Tensor & self, at::ArrayRef padding, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d8580ae152672c2702bd2f57a095092f8289a581 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor row_indices(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..025e422eb2f8eb1b8060a585cce090551727864c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79db3964d4cc831551a4f44a00a07493b2a66881 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_legendre_polynomial_p_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6d03401eea9cc72a757bbbdc8a6ac0bda97f4556 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_modified_bessel_k0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_modified_bessel_k0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_modified_bessel_k0(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_modified_bessel_k0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_modified_bessel_k0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..56bb86216b37b67225000ae99e652919f75b38bf --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API c10::SymInt sym_storage_offset(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c8817c5667b9f0b77bcb9b3ee7e731947bfb9ea --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/to_mkldnn_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor to_mkldnn_backward(const at::Tensor & grad, const at::Tensor & input); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/trapz.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/trapz.h new file mode 100644 index 0000000000000000000000000000000000000000..7546724b412b034ec4b4d35ddd9116e2f865878b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/trapz.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor +inline at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::trapz_x::call(y, x, dim); +} + +// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor +inline at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1) { + return at::_ops::trapz_dx::call(y, dx, dim); +} + +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_meta.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8a98e713625b350f4e40e453f24795bd052eaed1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_bilinear2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w); +}; + +} // namespace native +} // namespace at