diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Byte_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Byte_native.h new file mode 100644 index 0000000000000000000000000000000000000000..387b1266e845d0d55de89caae7c516384b5f379b --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Byte_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Byte(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_compress.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_compress.h new file mode 100644 index 0000000000000000000000000000000000000000..5a4f10ed85baabd4a10da33c7695abdb959a0eb6 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_compress.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cslt_compress(Tensor input) -> Tensor +inline at::Tensor _cslt_compress(const at::Tensor & input) { + return at::_ops::_cslt_compress::call(input); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..36fa66544c29b73222677592c64281ad51419f58 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cudnn_ctc_loss { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_ctc_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +}; + +struct TORCH_API _cudnn_ctc_loss_Tensor { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_ctc_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +}; + +struct TORCH_API _cudnn_ctc_loss_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cudnn_ctc_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h new file mode 100644 index 0000000000000000000000000000000000000000..7a8d8def174aa87365e9a5dfdc70e9e126a2c3c7 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_erf.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_erf(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_erf(at::TensorList self) { + return at::_ops::_foreach_erf::call(self); +} + +// aten::_foreach_erf_(Tensor(a!)[] self) -> () +inline void _foreach_erf_(at::TensorList self) { + return at::_ops::_foreach_erf_::call(self); +} + +// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_erf_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erf_out::call(self, out); +} +// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_erf_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erf_out::call(self, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..f55fc6bd109dd49263b7934ad802301bdb4cbc3c --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_assert_scalar.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_functional_assert_scalar(Scalar self, str assert_msg, Tensor dep_token) -> Tensor +inline at::Tensor _functional_assert_scalar(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) { + return at::_ops::_functional_assert_scalar::call(self, assert_msg, dep_token); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7ab748f7e3828c0398649a363b35c0fe2a2e90ff --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _fw_primal(const at::Tensor & self, int64_t level); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5ede1e32f911922d9ef88d621e622bbe4a79c043 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..190051feb674f344c16bcfd81df596f0bd3946ed --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..67cfde367693393d9872bb36fb3b9a19d2cb135a --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor +inline at::Tensor _sparse_broadcast_to_copy(const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy::call(self, size); +} + +// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_broadcast_to_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out); +} +// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_broadcast_to_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_broadcast_to_copy_out::call(self, size, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..9ce383d5c70f0dc6fa728ac7f6f973a986fcb89c --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sparse_matmul.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor _sparse_sparse_matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul::call(self, other); +} + +// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_sparse_matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul_out::call(self, other, out); +} +// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_sparse_matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::_sparse_sparse_matmul_out::call(self, other, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..55be257872606a14eb4dfa16639dce066b3e6aac --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_to_cpu_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_to_cpu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c283d3bf38b7e25a0a89b3a445daf77a6179b1d8 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/_to_cpu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _to_cpu { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_cpu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_cpu(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/acos.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/acos.h new file mode 100644 index 0000000000000000000000000000000000000000..395609e4f653a03c099e022e584c0001cfc863d4 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/acos.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::acos(Tensor self) -> Tensor +inline at::Tensor acos(const at::Tensor & self) { + return at::_ops::acos::call(self); +} + +// aten::acos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & acos_(at::Tensor & self) { + return at::_ops::acos_::call(self); +} + +// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::acos_out::call(self, out); +} +// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::acos_out::call(self, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_compositeimplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1b9b1284d4cdb8ed7250b54795c5434db44098d --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/align_as_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor align_as(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5062377415a33ba2d9a2bccbf10f104662415123 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/all_meta_dispatch.h @@ -0,0 +1,31 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor all(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor all(const at::Tensor & self); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..9351124297c314a26151eb5b5ddb179b18d07c9f --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/as_strided.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} +namespace symint { + template ::value>> + at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + at::Tensor as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); + } +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} +namespace symint { + template ::value>> + const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + const at::Tensor & as_strided_(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::call(self, size, stride, storage_offset); + } +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..909f8b432177a9b0a6fb36deb34d715e72276701 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/atan2_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API atan2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API atan2_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan2_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API atan2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan2(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec584ce60f0ff0938de5c63119911c862253ef4d --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor atleast_1d(const at::Tensor & self); +TORCH_API ::std::vector atleast_1d(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f4add9e3a3cd75ce739454371027d9b16894b683 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple batch_norm_backward_reduce_out(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); +TORCH_API ::std::tuple batch_norm_backward_reduce_cuda(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..00e674925702e2dac42ec88ac9a2a73c25d7a0e2 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bilinear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bilinear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor") + static at::Tensor call(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..42a10def65600bb8d0e75f715a62b7bb5e1f2a9a --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_right_shift_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_bitwise_right_shift_out : public at::meta::structured_bitwise_right_shift_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor bitwise_right_shift(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_right_shift_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_right_shift_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_right_shift(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_right_shift_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fb9e60f21e135559f8dbc984ad9e0d7b1cf791b --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max); +TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_cpu_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f4e2e606f28f98b82b96985f1879a663c0a7f208 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_max_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Scalar & max, at::Tensor & out); +TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Scalar & max); +TORCH_API at::Tensor clamp_max(const at::Tensor & self, const at::Tensor & max); +TORCH_API at::Tensor & clamp_max_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & max); +TORCH_API at::Tensor & clamp_max_outf(const at::Tensor & self, const at::Tensor & max, at::Tensor & out); +TORCH_API at::Tensor & clamp_max_(at::Tensor & self, const at::Tensor & max); + +} // namespace cpu +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/combinations.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/combinations.h new file mode 100644 index 0000000000000000000000000000000000000000..e0b54b1f663d89a1e6aeac801b16a38b763f0d45 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/combinations.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor +inline at::Tensor combinations(const at::Tensor & self, int64_t r=2, bool with_replacement=false) { + return at::_ops::combinations::call(self, r, with_replacement); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/contiguous_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/contiguous_native.h new file mode 100644 index 0000000000000000000000000000000000000000..da924668246a048c6f89366f32f23ec31bf1813a --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/contiguous_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor contiguous(const at::Tensor & self, at::MemoryFormat memory_format=MemoryFormat::Contiguous); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a30a62c4046ac63568b445eb036e3088cfb1d437 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_copysign_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/dense_dim_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/dense_dim_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc81da6a8c186ac660705fa6b82366deab22f291 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/dense_dim_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t dense_dim_strided(const at::Tensor & self); +TORCH_API int64_t dense_dim_sparse(const at::Tensor & self); +TORCH_API int64_t dense_dim_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/div_cuda_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/div_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4f283c7ac8b7f15010b48ad60a6a0749cfc30baf --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/div_cuda_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor div(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & div_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor div(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); +TORCH_API at::Tensor & div_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); +TORCH_API at::Tensor & div_outf(const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out); +TORCH_API at::Tensor & div_(at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode); + +} // namespace cuda +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/eye_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/eye_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8a00dc9e7a1c23222702720b7ba9241396b2cd9b --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/eye_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API eye { + using schema = at::Tensor (c10::SymInt, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API eye_m { + using schema = at::Tensor (c10::SymInt, c10::SymInt, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "m") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymInt n, c10::SymInt m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API eye_out { + using schema = at::Tensor & (c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, at::Tensor & out); +}; + +struct TORCH_API eye_m_out { + using schema = at::Tensor & (c10::SymInt, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::eye") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "m_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymInt n, c10::SymInt m, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymInt n, c10::SymInt m, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn.h new file mode 100644 index 0000000000000000000000000000000000000000..b5638bb13e6e159bab6d630c9f479591dd7abb58 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +inline at::Tensor fft_fftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_fftn(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::call(self, s, dim, norm); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..637e920813d050358e6ab607ee9c6b0021eb8528 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/float_power_compositeimplicitautograd_dispatch.h @@ -0,0 +1,33 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & float_power_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & float_power_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/floor_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/floor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e38ab4cb9f55874322e1ec0f5ba937a0f6d9b800 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/floor_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_floor_out : public at::meta::structured_floor { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor floor_sparse(const at::Tensor & self); +TORCH_API at::Tensor & floor_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_sparse_(at::Tensor & self); +TORCH_API at::Tensor floor_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & floor_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a37accf4c783dd7314208ee702259bfac0fe16a9 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/full_like_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & full_like_out(const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ff4abff062f07b79ffb494a6bbe740126a26e72b --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gelu_backward_out_cpu : public at::meta::structured_gelu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, const at::Tensor & grad_input); +}; +struct TORCH_API structured_gelu_backward_out_cuda : public at::meta::structured_gelu_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, const at::Tensor & grad_input); +}; +TORCH_API at::Tensor gelu_backwards_nested(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor mkldnn_gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gt.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gt.h new file mode 100644 index 0000000000000000000000000000000000000000..366b9b36a2144697c15631369ff9f918226f6784 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gt.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar_out::call(self, other, out); +} +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::gt_Scalar_out::call(self, other, out); +} + +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor gt(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar::call(self, other); +} + +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor_out::call(self, other, out); +} +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gt_Tensor_out::call(self, other, out); +} + +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor gt(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor::call(self, other); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..6d009f5fd27a763e9d71ff60753ffad6bc0cef92 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_gt_Scalar : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & other); +}; +struct TORCH_API structured_gt_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..df98f2662d6e64930a00863c8c317c0e9b439846 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_hardsigmoid : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a8e6e18b645ef9781acbf9d05c463c6a431d6c83 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardsigmoid_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardsigmoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API hardsigmoid { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardsigmoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardsigmoid(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API hardsigmoid_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardsigmoid_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardsigmoid_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ee73a2a725a38f2bfcf4425f06b802bac4dae677 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/heaviside_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_heaviside_out : public at::meta::structured_heaviside { +void impl(const at::Tensor & self, const at::Tensor & values, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..66b5eed6d99d6fe0b34fc7d1a24ed8d4cb1eb27e --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/histc_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API histc_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out); +}; + +struct TORCH_API histc { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::histc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h new file mode 100644 index 0000000000000000000000000000000000000000..2b64824d76ebf22dc922f7c20809a5d55051edae --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/i0.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::i0(Tensor self) -> Tensor +inline at::Tensor i0(const at::Tensor & self) { + return at::_ops::i0::call(self); +} + +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & i0_(at::Tensor & self) { + return at::_ops::i0_::call(self); +} + +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::i0_out::call(self, out); +} +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::i0_out::call(self, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..519b18dc7c7f6d15a70bbc1203402f0481044d18 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/is_inference_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_inference { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_inference") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_inference(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a88b448c4c8ddbc0676aa839e532d2016f13c1d --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/is_neg_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_neg(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h new file mode 100644 index 0000000000000000000000000000000000000000..d6e96599a7505352bd331e7bdfc0d4f9d24ca8bb --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h @@ -0,0 +1,79 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::call(window_length, out); +} +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::call(window_length, out); +} + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} + +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c2929d4f764384fc8408c377404a68ee458b0c8c --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/masked_fill_native.h @@ -0,0 +1,33 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill_Scalar_out(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & masked_fill__cpu(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill__cuda(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor NestedTensor_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill__quantized_cpu(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor & masked_fill__quantized_cuda(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value); +TORCH_API at::Tensor masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill_Tensor_out(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out); +TORCH_API at::Tensor & masked_fill__cpu(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill__cuda(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill__quantized_cpu(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +TORCH_API at::Tensor & masked_fill__quantized_cuda(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..773c588deda076c45abfaefd08924c8fd3525ac7 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/minimum.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/minimum.h new file mode 100644 index 0000000000000000000000000000000000000000..438bc1746a1b52d98aa44bfaec8aee16b0e364dc --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/minimum.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::minimum(Tensor self, Tensor other) -> Tensor +inline at::Tensor minimum(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::minimum::call(self, other); +} + +// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::minimum_out::call(self, other, out); +} +// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::minimum_out::call(self, other, out); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b599115b674acc97572126e754f2cc59dc58d5f6 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_adaptive_avg_pool2d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor mkldnn_adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & mkldnn_adaptive_avg_pool2d_out(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61244fc51a5b06a0f955b01f8da4d29f9d2d0095 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_max_pool3d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +struct TORCH_API mkldnn_max_pool3d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer.h new file mode 100644 index 0000000000000000000000000000000000000000..9ef7321e75a6fb59ab97378c8217ceec9757d23f --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple mkldnn_rnn_layer(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { + return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train); +} + +// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +inline ::std::tuple mkldnn_rnn_layer_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) { + return at::_ops::mkldnn_rnn_layer_out::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); +} +// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +inline ::std::tuple mkldnn_rnn_layer_outf(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::mkldnn_rnn_layer_out::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train, out0, out1, out2, out3); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e6fc13b72e17a3f08c66fe1aa23e79fb8aa63192 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_rnn_layer_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_rnn_layer_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_rnn_layer_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace); +}; + +struct TORCH_API mkldnn_rnn_layer_backward_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, bool, int64_t, int64_t, int64_t, bool, bool, bool, at::IntArrayRef, bool, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_rnn_layer_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!))") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, at::Tensor & out6); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4a031b925989f4c016a870504c5e10e54a4e6ee8 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_mse_loss_out : public at::meta::structured_mse_loss { +void impl(const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/msort_compositeimplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/msort_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e321ab1d6b38c758dee7209182daf5d3b0f3b3b1 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/msort_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor msort(const at::Tensor & self); +TORCH_API at::Tensor & msort_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & msort_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1fcc7fbb9cab145dc8c53fc1d325bcd1b7aa6bce --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nll_loss2d_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100); +TORCH_API at::Tensor & nll_loss2d_out(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal_compositeimplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b9a6f8e976d4659ebaa0ba68c74543cd0306cf1 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/not_equal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor not_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & not_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & not_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor not_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & not_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & not_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & not_equal_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d5e328923eaaea776dec918a69af7bdff91bc850 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/pad_sequence_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API pad_sequence { + using schema = at::Tensor (at::TensorList, bool, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pad_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor") + static at::Tensor call(at::TensorList sequences, bool batch_first, double padding_value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList sequences, bool batch_first, double padding_value); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c20bedcd8da991d37d60117cbbaae8d429c49e4 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_unshuffle_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & pixel_unshuffle_out(at::Tensor & out, const at::Tensor & self, int64_t downscale_factor); +TORCH_API at::Tensor & pixel_unshuffle_outf(const at::Tensor & self, int64_t downscale_factor, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_nll_loss_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_nll_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..66d1c4435b70f6ba72c51156c8a18da381f78035 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_nll_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor poisson_nll_loss(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cuda_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e94b3734f279106580d9446f559ca3b441b7e11 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_meta_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bdae9acbbb7eee9311ce50b1432be188288d52d4 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/softmax_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f43baad39cc2ab4b87c0299237a31c0938e2e3f6 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/softmax_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +struct TORCH_API softmax_int_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); +}; + +struct TORCH_API softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_dim_cpu_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_dim_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cd9e2784c94a6ccfb097a3be2910a0a0f3bdbaae --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_dim_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API int64_t sparse_dim(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_digamma_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_digamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dc90fbdefc4a1477bed4f34f7e2fec3929f5aea6 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_digamma_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_digamma(const at::Tensor & self); +TORCH_API at::Tensor & special_digamma_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaincc.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaincc.h new file mode 100644 index 0000000000000000000000000000000000000000..9f971732ea56568571623f878abb06979950c26a --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaincc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_gammaincc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammaincc_out::call(self, other, out); +} +// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_gammaincc_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammaincc_out::call(self, other, out); +} + +// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor +inline at::Tensor special_gammaincc(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammaincc::call(self, other); +} + +} diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e51c50a1a056fb7b8602952fd7720b11f5feced --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_log_ndtr(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5c20a02a5a3d94700bdfc3ca635e854bd44f83d0 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_xlog1py : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_native.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a86b3bf6e294f59e6ca93eeb81d0490442dfac17 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/sqrt_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sqrt_out : public at::meta::structured_sqrt { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor sqrt_sparse(const at::Tensor & self); +TORCH_API at::Tensor & sqrt_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sqrt_sparse_(at::Tensor & self); +TORCH_API at::Tensor sqrt_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & sqrt_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sqrt_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c02d3f30732d2397e2fb246e07e3dc0ce0f2ddda --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_sparse_csc { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_sparse_csc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cuda_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..83899990b92e9a8e14afc117c27961e0beae7a09 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace cuda +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..287d5d336a5a40a60bb1cf54a0174cd2c3296b39 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/triu_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor triu(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & triu_(at::Tensor & self, int64_t diagonal=0); + +} // namespace meta +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ee6c0d108ea19bee8b688d0180d25f3f117e233e --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_bilinear2d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, bool, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_bilinear2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +}; + +struct TORCH_API upsample_bilinear2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_bilinear2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..616a4d16c5c105c73a9342d049c768e27a549f54 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view_as_real_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_real_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_real_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API view_as_real_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_as_real_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d830e8bb2aa6e3b6f518e3168899d50c72a416c3 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor view_as_real(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_ops.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..874e79c85c675eb3cb79f2259e11bfb2c983f7b0 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API vstack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::vstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "vstack(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API vstack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::vstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/vila/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f47c36a91f2d70d6fc347e2b9940a4ab754202a7 --- /dev/null +++ b/vila/lib/python3.10/site-packages/torch/include/ATen/ops/where_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor where(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_out(at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & where_outf(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at