diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dafda977b01f8790bb712603ce7af3259d1f1573 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor _adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72d5f19e897c355777bd05526b2636843b095493 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_clear_plan_cache_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _cufft_clear_plan_cache(at::DeviceIndex device_index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..332d231002c1403f4590925e221b1d0463c0d060 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const ::std::optional & per_sample_weights, int64_t padding_idx=-1); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3acc572e9eb25cd6f76cd6792d95ed05bec53898 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_cos_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector foreach_tensor_cos_slow(at::TensorList self); +TORCH_API void _foreach_cos_out(at::TensorList self, at::TensorList out); +TORCH_API void foreach_tensor_cos_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_cos_cuda(at::TensorList self); +TORCH_API void foreach_tensor_cos_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ea4e1131c7b8058aafe8a9d3c7a6e054bfc0e6a1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_floor { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_floor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_floor(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_floor_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_floor_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_floor_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_floor_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_floor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec6e5481eae2f8a2ef8cb37dbec58f22669767d8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +TORCH_API ::std::tuple _native_batch_norm_legit(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, at::Tensor & running_mean, at::Tensor & running_var, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, bool training, double momentum, double eps); +TORCH_API ::std::tuple _native_batch_norm_legit_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_select_backward_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4d462bd1343adc149e71b870621d9468bc4e9a99 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _nested_select_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe.h new file mode 100644 index 0000000000000000000000000000000000000000..104ccac7d2e0ac8d60e875c80ac555dbf6f570f7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0672af2c7d3653a9824658c6c5d5fb23014f12b3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor +inline at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self); +} + +// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor +inline at::Tensor _test_autograd_multiple_dispatch(const at::Tensor & self, bool b) { + return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b); +} + +// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _test_autograd_multiple_dispatch_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self, out); +} +// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _test_autograd_multiple_dispatch_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::call(self, out); +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cda1d5ee45e93daa7679890de6267cd5e452d1b8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e559d5f7bd0f9077d0e94583623b01e13c994180 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, ::std::optional> scale_factors); +TORCH_API at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, ::std::optional> scale_factors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6cdcb2994132f695321115ed3e65a799f2e8391d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor asinh(const at::Tensor & self); +TORCH_API at::Tensor & asinh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & asinh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asinh_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5d2e101a30d901bd6772ccf8069f7b1513b49294 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_atanh_out : public at::meta::structured_atanh { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor atanh_sparse(const at::Tensor & self); +TORCH_API at::Tensor & atanh_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atanh_sparse_(at::Tensor & self); +TORCH_API at::Tensor atanh_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & atanh_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atanh_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3e99ca612b27e6b66774fd93f11ab1f75923382a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _new_batch_norm_backward_cpu(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, bool update, double eps, ::std::array output_mask, const at::Tensor & reserve); +TORCH_API ::std::tuple _new_batch_norm_backward_cuda(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, bool update, double eps, ::std::array output_mask, const at::Tensor & reserve); +TORCH_API ::std::tuple _new_batch_norm_backward_mkldnn(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, bool update, double eps, ::std::array output_mask, const at::Tensor & reserve); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f22ff8d4b3530369def1a5d23ae7313f9001e400 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple batch_norm_stats_out(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple batch_norm_stats_cuda(const at::Tensor & input, double eps); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0248ca6be11f0bd3b23c1c5478820df6c4267197 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bernoulli(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9e1b93c4c9d836a607bc6e40cf211328b95eb996 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor binary_cross_entropy_with_logits(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight={}, const ::std::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & binary_cross_entropy_with_logits_out(const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, const ::std::optional & pos_weight, int64_t reduction, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa3eb479f702a4115e4c88e3e82da33ac146deb3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2cdb117a6dfa8ef515d349fd61dca62f0bd9b831 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_and_out : public at::meta::structured_bitwise_and_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_and_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_and(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8e7746535ec8165ee6ceab1c999f4675f5502da4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bitwise_left_shift_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_left_shift__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_left_shift_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API bitwise_left_shift_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API bitwise_left_shift__Tensor_Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API bitwise_left_shift_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API bitwise_left_shift_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_left_shift_Scalar_Tensor_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_meta_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5d78bf5906c610635664f99c8ec8e9c94c6c0811 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor bitwise_not(const at::Tensor & self); +TORCH_API at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & bitwise_not_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_meta_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3abf2e8d9d832f19026e66aa85d8a2d7de40c8d3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2); +TORCH_API at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2); +TORCH_API at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max.h new file mode 100644 index 0000000000000000000000000000000000000000..b415737498827033cb6c5142592b5c795759ebef --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::clamp_max(Tensor self, Scalar max) -> Tensor +inline at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max::call(self, max); +} + +// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor +inline at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max_Tensor::call(self, max); +} + +// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) +inline at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max_::call(self, max); +} + +// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) +inline at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max__Tensor::call(self, max); +} + +// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max_out::call(self, max, out); +} +// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out) { + return at::_ops::clamp_max_out::call(self, max, out); +} + +// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max_Tensor_out::call(self, max, out); +} +// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out) { + return at::_ops::clamp_max_Tensor_out::call(self, max, out); +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8249f6b9417781cfba82b290c67a7788d17e9d50 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API clamp_max { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max(Tensor self, Scalar max) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max); +}; + +struct TORCH_API clamp_max_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max.Tensor(Tensor self, Tensor max) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max); +}; + +struct TORCH_API clamp_max_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & max); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max); +}; + +struct TORCH_API clamp_max__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & max); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max); +}; + +struct TORCH_API clamp_max_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out); +}; + +struct TORCH_API clamp_max_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clone_compositeexplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clone_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1654ad22976341b2657f403d41f16f717b816a43 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/clone_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor clone(const at::Tensor & self, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & clone_out(at::Tensor & out, const at::Tensor & self, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & clone_outf(const at::Tensor & self, ::std::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..12dce814d45553f1f1c2b34c8489f5a28ec9268f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/convolution_backward_overrideable_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple convolution_backward_overrideable(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_overrideable_symint(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_overrideable_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_overrideable_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple convolution_backward_overrideable_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask); +TORCH_API ::std::tuple convolution_backward_overrideable_symint_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..32c2a9b1106d73f0ea7c20be136feaf400543bef --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/count_nonzero_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor count_nonzero(const at::Tensor & self, at::IntArrayRef dim); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeimplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b522404cf7975c19a5f2cbdb1ff504a5dd83b43 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple cummin(const at::Tensor & self, at::Dimname dim); +TORCH_API ::std::tuple cummin_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim); +TORCH_API ::std::tuple cummin_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..26458e6b05ed233ed19076ce3b5f7909070903a7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/deg2rad_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor deg2rad(const at::Tensor & self); +TORCH_API at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & deg2rad_(at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/div.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/div.h new file mode 100644 index 0000000000000000000000000000000000000000..a8ef85c995aa830984fb8ac6d1937169d64faaa8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/div.h @@ -0,0 +1,81 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::div.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor div(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::div_Tensor::call(self, other); +} + +// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::div_out::call(self, other, out); +} +// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::div_out::call(self, other, out); +} + +// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +inline at::Tensor div(const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode) { + return at::_ops::div_Tensor_mode::call(self, other, rounding_mode); +} + +// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode) { + return at::_ops::div_out_mode::call(self, other, rounding_mode, out); +} +// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, ::std::optional rounding_mode, at::Tensor & out) { + return at::_ops::div_out_mode::call(self, other, rounding_mode, out); +} + +// aten::div.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor div(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::div_Scalar::call(self, other); +} + +// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +inline at::Tensor div(const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode) { + return at::_ops::div_Scalar_mode::call(self, other, rounding_mode); +} + +// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::div_Scalar_out::call(self, other, out); +} +// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::div_Scalar_out::call(self, other, out); +} + +// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode) { + return at::_ops::div_Scalar_mode_out::call(self, other, rounding_mode, out); +} +// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & div_outf(const at::Tensor & self, const at::Scalar & other, ::std::optional rounding_mode, at::Tensor & out) { + return at::_ops::div_Scalar_mode_out::call(self, other, rounding_mode, out); +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/eq_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/eq_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..070bbb84aa32641332af478f3843039fbc0dc429 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/eq_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API eq__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eq_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API eq__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eq_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API eq_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API eq_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eq.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API eq_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API eq_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eq") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eq.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd7354b7d4b9af387d64e1c61737edea95d2c832 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftn_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_rfftn(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor fft_rfftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_rfftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_rfftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_rfftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::OptionalIntArrayRef dim=::std::nullopt, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_rfftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, ::std::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9332df138892de6340f2da918ec6683dae7eef08 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Scalar & value); +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Tensor & value); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d80820903456f754bc3eea590f72e128a2521a96 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_dense_tensors_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor flatten_dense_tensors(at::TensorList tensors); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_meta.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..20492b854dc7970c97060c4e362b5967f98efd18 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_fmax : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0164cea021ffb61e6e6caf0e4922c7d9618f8614 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool2d_output { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); +}; + +struct TORCH_API fractional_max_pool2d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..02a82c860f24c22aadf10f242d172fdd9bfc2b37 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool3d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fractional_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_out(at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples); +TORCH_API ::std::tuple fractional_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a4afd84d9678136255beda99578984648aecf7c2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/frobenius_norm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..819e120a26567a7bc6bb961cadca5b7e1efb3816 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd); +TORCH_API at::Tensor & hardshrink_backward_outf(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5bfedcad5b6c885e68c1510406d04f33bccdb66a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..0143b83c33e210fc5760954953a382486640e572 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igammac : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e479bdccd111a7f1d336dda768241b9c8919bf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igammac_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igammac") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igammac { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igammac") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igammac(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igammac_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igammac_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4080458b80553ba8663d14ea25b396020aac9d23 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf3c70bbfafbdef0b0079d0abb42bc552af119f2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_out(at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced"); +TORCH_API ::std::tuple linalg_qr_outf(const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b11870074926948d4321dc3b88378d4936fcd3f7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_solve_triangular(const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false); +TORCH_API at::Tensor & linalg_solve_triangular_out(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b71771dad48bd3997baee02183432c38e43ee1c6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_tensorinv_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_out(const at::Tensor & self, int64_t ind, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..a1d9f14d3e2afacdcda8ef602ba54a16998d2a6b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) +inline ::std::tuple native_batch_norm(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps) { + return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps); +} + +// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_batch_norm_out(at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps) { + return at::_ops::native_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); +} +// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple native_batch_norm_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) { + return at::_ops::native_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a484a41b8227c556f729629582b2836beb9fc31f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_batch_norm { + using schema = ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps); +}; + +struct TORCH_API native_batch_norm_out { + using schema = ::std::tuple (const at::Tensor &, const ::std::optional &, const ::std::optional &, const ::std::optional &, const ::std::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/ne_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/ne_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4847a150f978cc90217b44533e962d4567a7aac --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/ne_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor ne(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & ne_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor ne(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & ne_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_compositeexplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f42346c2784709898d11f9b239dff9c51b012576 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor new_full_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor new_full_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & new_full_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & new_full_outf(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +TORCH_API at::Tensor & new_full_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value); +TORCH_API at::Tensor & new_full_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..71456b97c5c3c871e009f80cf4250402efd76dfa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor pixel_shuffle(const at::Tensor & self, int64_t upscale_factor); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..32f559fb7ede018c4e9e7f229c8eec4eaaa8d157 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/random_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, ::std::optional generator=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1f92f978ab7381fa48d6fc0ba265a580444de53a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/reciprocal_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reciprocal { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reciprocal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reciprocal(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API reciprocal_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reciprocal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reciprocal_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API reciprocal_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reciprocal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/resize_meta_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/resize_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..532b697f5437fe0baba0d3d116cb6a516c3cb86c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/resize_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, ::std::optional memory_format=::std::nullopt); +TORCH_API const at::Tensor & resize__symint(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional memory_format=::std::nullopt); + +} // namespace meta +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..74407abcc23b1b9c538506e335b85c9f14919cbe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/row_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/rsub_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/rsub_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d4b1869672c3d5a60321f426b23293f334fb4a5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/rsub_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_meta.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..25a64b5ca538d9cb855cdbff3018728187cd6868 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_meta.h @@ -0,0 +1,42 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_scatter_src : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +}; +struct TORCH_API structured_scatter_value : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +}; +struct TORCH_API structured_scatter_reduce : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +}; +struct TORCH_API structured_scatter_value_reduce : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +}; + +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd254bf37ebfe3b285cf199aa223434d9360615a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/silu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & silu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & silu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b646459ddcc0222146e46ded05744f71311246bc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API slow_conv_dilated3d { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slow_conv_dilated3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slow_conv_dilated3d(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); +}; + +struct TORCH_API slow_conv_dilated3d_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, const ::std::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slow_conv_dilated3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slow_conv_dilated3d.out(Tensor self, Tensor weight, SymInt[3] kernel_size, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..895c77d94442d5616559d79d1c724bc6b0d121a1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor sparse_coo_tensor(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & sparse_coo_tensor_out(at::Tensor & out, at::IntArrayRef size); +TORCH_API at::Tensor & sparse_coo_tensor_outf(at::IntArrayRef size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_sampled_addmm.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_sampled_addmm.h new file mode 100644 index 0000000000000000000000000000000000000000..759c3978c68561f7cd3b95ca7a03de61b94b9ca9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_sampled_addmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sparse_sampled_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sparse_sampled_addmm_out::call(self, mat1, mat2, beta, alpha, out); +} +// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sparse_sampled_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sparse_sampled_addmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor sparse_sampled_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha); +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fa2622a7d8f60140cac3d3be971e9cb7ed65069d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_airy_ai_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_airy_ai_out : public at::meta::structured_special_airy_ai { +void impl(const at::Tensor & x, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..669760680ecd229e35860a74c86cb8f5b992e0c0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_i1 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_i1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_i1(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_i1_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_i1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_native.h new file mode 100644 index 0000000000000000000000000000000000000000..52e6ab859521a5b259df3eb49dde948a3188c2d5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_log1p(const at::Tensor & self); +TORCH_API at::Tensor & special_log1p_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cpu_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f57a5360904b20817e617360fb7c38ffba83001 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_modified_bessel_i0(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtr_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..34ff507bf09b82a9c8a0b6d4ae5df7ecc3ef49a4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_ndtr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_ndtr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/subtract_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/subtract_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d8b2c01bdbb22cc90a11d18f6bd8ddac119e434d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/subtract_ops.h @@ -0,0 +1,72 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API subtract_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API subtract_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API subtract__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha); +}; + +struct TORCH_API subtract_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +struct TORCH_API subtract__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::subtract_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dab4bd136dc56f166bd44f6d6b2b920da4539aa0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/tensordot_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor tensordot(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other); +TORCH_API at::Tensor & tensordot_out(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..464203bad925381ca42b4925be2d2acd88d7ea0f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_bilinear2d_backward_out_cpu : public at::meta::structured_upsample_bilinear2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_bilinear2d_backward_out_cuda : public at::meta::structured_upsample_bilinear2d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2841679aa6f2baac5cf618cd9cf445c5eeeda961 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_nearest2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor upsample_nearest2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ce9dcc94c9058cf57a308b913862b376611155e5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +inline at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); +} +namespace symint { + template ::value>> + at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); + } +} + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +inline at::Tensor value_selecting_reduction_backward_symint(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); +} +namespace symint { + template ::value>> + at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); + } +} + +} diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy_native.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..181610609dd7d983ef29da296033c91cafb02b15 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & view_copy_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor view_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size); +TORCH_API at::Tensor & view_copy_dtype_out(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor view_copy_dtype(const at::Tensor & self, at::ScalarType dtype); +} // namespace native +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/view_ops.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/view_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a13519b0c39bf434905b95994d56797103bdc45c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/view_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view(Tensor(a) self, SymInt[] size) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size); +}; + +struct TORCH_API view_dtype { + using schema = at::Tensor (const at::Tensor &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype); +}; + +}} // namespace at::_ops diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_cuda_dispatch.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd200e821a75af658957bb5614bb442b02302065 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/zero.h b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/zero.h new file mode 100644 index 0000000000000000000000000000000000000000..54938c07317dd1a18690430c6cd529919c03571a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/ATen/ops/zero.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::zero_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & zero_(at::Tensor & self) { + return at::_ops::zero_::call(self); +} + +// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & zero_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::zero_out::call(self, out); +} +// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & zero_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::zero_out::call(self, out); +} + +// aten::zero(Tensor self) -> Tensor +inline at::Tensor zero(const at::Tensor & self) { + return at::_ops::zero::call(self); +} + +}