diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_addmm_activation_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_addmm_activation_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d211759a06dfba8323dcbc4ae9261f0096ff8ba --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_addmm_activation_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false); +TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9493e246b43e591bcb67d6e08f70f8b65c3b1d4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _conj_physical(const at::Tensor & self); +TORCH_API at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bb55728db8ab1c7ed40caba72e69bb2593067950 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_ctc_loss_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _cudnn_ctc_loss_out(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple _cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +TORCH_API ::std::tuple _cudnn_ctc_loss_tensor(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_rnn_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_rnn_native.h new file mode 100644 index 0000000000000000000000000000000000000000..47fe97b0fe6635e2046160d6e4413fbd12e9b29f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cudnn_rnn_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _cudnn_rnn_out_symint(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional & weight_buf, const at::Tensor & hx, const ::std::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const ::std::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4); +TORCH_API ::std::tuple _cudnn_rnn(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const ::std::optional & weight_buf, const at::Tensor & hx, const ::std::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..becb3e4bd64d377ce246bd2a41a1e822c3e30656 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _embedding_bag_per_sample_weights_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_per_sample_weights_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx); +}; + +struct TORCH_API _embedding_bag_per_sample_weights_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_per_sample_weights_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_empty_affine_quantized_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_empty_affine_quantized_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..961ab9388ea1ef56fae7bad635c6b45caaa1c5d6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_empty_affine_quantized_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, ::std::optional memory_format=c10::MemoryFormat::Contiguous); +TORCH_API at::Tensor _empty_affine_quantized(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, double scale, int64_t zero_point, ::std::optional memory_format); +TORCH_API at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, ::std::optional memory_format=c10::MemoryFormat::Contiguous); +TORCH_API at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, double scale, int64_t zero_point, ::std::optional memory_format); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_sdp_choice.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_sdp_choice.h new file mode 100644 index 0000000000000000000000000000000000000000..2efb01389ed19c01d9ab18e7326d80c684ce18f7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fused_sdp_choice.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> int +inline int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional scale=::std::nullopt, bool enable_gqa=false) { + return at::_ops::_fused_sdp_choice::call(query, key, value, attn_mask, dropout_p, is_causal, scale, enable_gqa); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..deac70c306c727e074b5f636c68f23a995be3687 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_fw_primal_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _fw_primal_copy_out(at::Tensor & out, const at::Tensor & self, int64_t level); +TORCH_API at::Tensor & _fw_primal_copy_outf(const at::Tensor & self, int64_t level, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42ad8d517f041f609a5ff671b69024e8cd29dc94 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Scalar _local_scalar_dense(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b9d2e5fd5be542077158871f57ae9c937e4f7895 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b75f2e9d15c1d4fa5424170f9fb7a4105e032e9e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_pack_padded_sequence_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _pack_padded_sequence { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first); +}; + +struct TORCH_API _pack_padded_sequence_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pack_padded_sequence") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..076f0fc1a63d87c504205f467a02a94c684b458e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_shape_as_tensor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _shape_as_tensor(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1fac70540751b2009eda06def54937bd19641781 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _sparse_coo_tensor_with_dims_out(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor new_with_dims_sparse(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_weight_int4pack_mm.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_weight_int4pack_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..1d937b44af7e7dd59b0b25a38a8986926f83b657 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_weight_int4pack_mm.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_weight_int4pack_mm(Tensor self, Tensor mat2, int qGroupSize, Tensor qScaleAndZeros) -> Tensor +inline at::Tensor _weight_int4pack_mm(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) { + return at::_ops::_weight_int4pack_mm::call(self, mat2, qGroupSize, qScaleAndZeros); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c9a4fe2b177167fc3517555c5993275e4b4d6bb6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..834df66dab5af9edc168aac4e6315f1f75ef0336 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..007d4c1b90af81317186dd13bedcc413d6367c69 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool1d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce96b40c3dc3ece19c59c1be2ea5ebaed063678e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..238f1c534dd901985853a87bedfcded3afdc19a8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/add_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b10f365d03b774d0904a0defab63a07f693f1090 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/arctan2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arctan2(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & arctan2_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & arctan2_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & arctan2_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argmax_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..11e0d6759b614ee468f2d98d5c64d7269de54d2d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/argmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API argmax { + using schema = at::Tensor (const at::Tensor &, ::std::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::argmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, ::std::optional dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dim, bool keepdim); +}; + +struct TORCH_API argmax_out { + using schema = at::Tensor & (const at::Tensor &, ::std::optional, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::argmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, ::std::optional dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin.h new file mode 100644 index 0000000000000000000000000000000000000000..f04c5011a306a552960e0da5f526a4df002632a7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/asin.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::asin(Tensor self) -> Tensor +inline at::Tensor asin(const at::Tensor & self) { + return at::_ops::asin::call(self); +} + +// aten::asin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & asin_(at::Tensor & self) { + return at::_ops::asin_::call(self); +} + +// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::asin_out::call(self, out); +} +// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::asin_out::call(self, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_stats.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..889f4d09aa15fb9e61afcce9a26a3758b25583c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_stats.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) +inline ::std::tuple batch_norm_stats(const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats::call(input, eps); +} + +// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple batch_norm_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats_out::call(input, eps, out0, out1); +} +// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple batch_norm_stats_outf(const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_stats_out::call(input, eps, out0, out1); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bmm.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bmm.h new file mode 100644 index 0000000000000000000000000000000000000000..6dfc9612702cacdfef410cf0aedc90809051f482 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::bmm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor bmm(const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm::call(self, mat2); +} + +// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm_out::call(self, mat2, out); +} +// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & bmm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::bmm_out::call(self, mat2, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/col_indices_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/col_indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7786bd61894dd75d15b6f215bc8bcc463fd4514c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/col_indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor col_indices(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/column_stack_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/column_stack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1ec62b2605fd86e6ce7080d14a1a37545080708d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/column_stack_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API column_stack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::column_stack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "column_stack(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API column_stack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::column_stack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ecbd914196a29abe7a4df5c30411557f165ce90f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/crow_indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor crow_indices(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumsum_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumsum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fdd4715db985f64e13d5f6cd0d6feb7377f48ba3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumsum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & cumsum_outf(const at::Tensor & self, at::Dimname dim, ::std::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumsum_(at::Tensor & self, at::Dimname dim, ::std::optional dtype=::std::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize.h new file mode 100644 index 0000000000000000000000000000000000000000..46658a1e8c368b4556ad62ed86bc9ea164e2c056 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::dequantize.self(Tensor self) -> Tensor +inline at::Tensor dequantize(const at::Tensor & self) { + return at::_ops::dequantize_self::call(self); +} + +// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] +inline ::std::vector dequantize(at::TensorList tensors) { + return at::_ops::dequantize_tensors::call(tensors); +} + +// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & dequantize_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::dequantize_self_out::call(self, out); +} +// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & dequantize_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::dequantize_self_out::call(self, out); +} + +// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () +inline void dequantize_out(at::TensorList out, at::TensorList tensors) { + return at::_ops::dequantize_tensors_out::call(tensors, out); +} +// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () +inline void dequantize_outf(at::TensorList tensors, at::TensorList out) { + return at::_ops::dequantize_tensors_out::call(tensors, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b81ad3fd3b89f9c5dff5b427637d34c36cc0ac16 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/dequantize_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API dequantize_self { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.self(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API dequantize_tensors { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.tensors(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API dequantize_self_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "self_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API dequantize_tensors_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dequantize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "tensors_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList tensors, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce2ea61127e9199526ff5c41eb2a8a81e3b3a743 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/embedding_sparse_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor embedding_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/float_power_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/float_power_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b997bc3616c879b817d343ddeffe6290b92799de --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/float_power_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API float_power_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +}; + +struct TORCH_API float_power_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & exponent); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent); +}; + +struct TORCH_API float_power_Scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +}; + +struct TORCH_API float_power_Scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Scalar(Scalar self, Tensor exponent) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & exponent); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent); +}; + +struct TORCH_API float_power_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +}; + +struct TORCH_API float_power_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & exponent); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent); +}; + +struct TORCH_API float_power__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & exponent); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent); +}; + +struct TORCH_API float_power__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::float_power_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & exponent); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmax_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..772b90b546a0e86e240c393b70e490b11e4a6598 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fmax_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_fmax_out : public at::meta::structured_fmax { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/huber_loss_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/huber_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..320b0b3887a4698ce6e5eb634f4f5a0cc9c78130 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/huber_loss_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API huber_loss_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::huber_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +}; + +struct TORCH_API huber_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::huber_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6092003dd6d727ee6338ccd7f02646d9316a10a5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_copy_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_reduce_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_reduce_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..684c0106914c76f79cedb852216e1121ff2b9d3e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/index_reduce_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_reduce_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_reduce") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +}; + +struct TORCH_API index_reduce_ { + using schema = at::Tensor & (at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_reduce_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +struct TORCH_API index_reduce { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &, const at::Tensor &, c10::string_view, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_reduce") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/le_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/le_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6aeb8347ceafd4803fa3490bc32f86ad48c50b14 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/le_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_le_Scalar_out : public at::meta::structured_le_Scalar { +void impl(const at::Tensor & self, const at::Scalar & other, const at::Tensor & out); +}; +TORCH_API at::Tensor le_quantized_cpu(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & le_out_quantized_cpu(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +struct TORCH_API structured_le_Tensor_out : public at::meta::structured_le_Tensor { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor le_quantized_cpu(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & le_out_quantized_cpu(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a9d4c00ddfc2bab89c3bd0caf0b727f9857034e7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_out(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bae9edb4f3461d3c6f7763fd02c3dd957d7de719 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor linalg_vector_norm(const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & linalg_vector_norm_outf(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6fa322d4ae2d545d7ee7e8c3446476d5e1275197 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_vector_norm_out : public at::meta::structured_linalg_vector_norm { +void impl(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linspace.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linspace.h new file mode 100644 index 0000000000000000000000000000000000000000..833acdcb7b7c2fd8dfbdea2bb910ea137e47efa5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linspace.h @@ -0,0 +1,97 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory); +} + +// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_out::call(start, end, steps, out); +} +// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_out::call(start, end, steps, out); +} + +// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps) { + return at::_ops::linspace_Tensor_Tensor_out::call(start, end, steps, out); +} +// aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Tensor_Tensor_out::call(start, end, steps, out); +} + +// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_Tensor_Scalar_out::call(start, end, steps, out); +} +// aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Tensor_Scalar_out::call(start, end, steps, out); +} + +// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps) { + return at::_ops::linspace_Scalar_Tensor_out::call(start, end, steps, out); +} +// aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_Scalar_Tensor_out::call(start, end, steps, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1731b779f5d1c4f550daefc6c9ca4c504ea4b408 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log10_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log10(const at::Tensor & self); +TORCH_API at::Tensor & log10_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a10faffd3437dcb3625c07aec7289e30fdd34a06 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor log(const at::Tensor & self); +TORCH_API at::Tensor & log_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & log_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & log_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ddbba218b0c8d5e636c57fed91cc53c1a8f183a5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input); +}; + +struct TORCH_API log_sigmoid_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6f8afefb74248ef0566a3066330ad5393c6f9efc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_forward_output { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +}; + +struct TORCH_API log_sigmoid_forward { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)") + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f75cb31fecaa314e08a514e3f1bbc1ee258c761 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logcumsumexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & logcumsumexp_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_xor_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_xor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..784c8a544d31b65c2c38c2f9c2a1565536416688 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_xor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b50dd218f4db1041b7b54a59d9d66da6f9e6bf10 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logit_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2f39c1e160f1aab8eed435fc84ca45c2fb6878bf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lt_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor lt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lt_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matrix_H_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matrix_H_native.h new file mode 100644 index 0000000000000000000000000000000000000000..65c4a66dbddef66fd784446b41beea93cc7c5140 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/matrix_H_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matrix_H(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..773c588deda076c45abfaefd08924c8fd3525ac7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & max_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8fac3223e3eac29b0a94175a695094f16544cb7b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/max_native.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_max_out : public at::meta::structured_max_dim { +void impl(const at::Tensor & self, int64_t dim, bool keepdim, const at::Tensor & max, const at::Tensor & max_values); +}; +TORCH_API ::std::tuple qmax(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API ::std::tuple max(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple max_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values); +TORCH_API at::Tensor max(const at::Tensor & self); +TORCH_API at::Tensor & max_unary_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor max_quantized_cpu(const at::Tensor & self); +TORCH_API at::Tensor & max_quantized_unary_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor max(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & max_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_convolution_relu_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_convolution_relu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d845b5239444c4f7391b55af45d7fa1849737992 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_convolution_relu_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor miopen_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a6bddc66913221bcc74702e16c7d48e4d6febc52 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nll_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nll_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +}; + +struct TORCH_API nll_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, c10::SymInt, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nll_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/or.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/or.h new file mode 100644 index 0000000000000000000000000000000000000000..877ce691462a165000ebc9732a237bc35379f8db --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/or.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor __or__(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__or___Scalar::call(self, other); +} + +// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor __or__(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__or___Tensor::call(self, other); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..501532a20bc60c1705e345fea48c04adc6236a97 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & quantize_per_tensor_dynamic_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype, bool reduce_range); +TORCH_API at::Tensor & quantize_per_tensor_dynamic_outf(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c0b2e1a0c1e471b062623f236811c9caa9fbbc8d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantize_per_tensor_dynamic { + using schema = at::Tensor (const at::Tensor &, at::ScalarType, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor_dynamic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range); +}; + +struct TORCH_API quantize_per_tensor_dynamic_out { + using schema = at::Tensor & (const at::Tensor &, at::ScalarType, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor_dynamic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_max_pool3d_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a185218c709033c56fa24b9bba7e939e7e1a6a89 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_max_pool3d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & quantized_max_pool3d_out(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +TORCH_API at::Tensor quantized_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7f8c31c8bfb8a57ed15bc5aff2127bc4472b3879 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/randn_like_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options={}, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor randn_like(const at::Tensor & self, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, ::std::optional memory_format); +TORCH_API at::Tensor & randn_like_out(at::Tensor & out, const at::Tensor & self, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor & randn_like_outf(const at::Tensor & self, ::std::optional memory_format, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/random.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/random.h new file mode 100644 index 0000000000000000000000000000000000000000..3f5cdf9fb61c8a2bffa78a427a78cd3729e54486 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/random.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_from_out::call(self, from, to, generator, out); +} +// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_outf(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator, at::Tensor & out) { + return at::_ops::random_from_out::call(self, from, to, generator, out); +} + +// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor +inline at::Tensor random(const at::Tensor & self, int64_t from, ::std::optional to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_from::call(self, from, to, generator); +} + +// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_to_out::call(self, to, generator, out); +} +// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_outf(const at::Tensor & self, int64_t to, ::std::optional generator, at::Tensor & out) { + return at::_ops::random_to_out::call(self, to, generator, out); +} + +// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor +inline at::Tensor random(const at::Tensor & self, int64_t to, ::std::optional generator=::std::nullopt) { + return at::_ops::random_to::call(self, to, generator); +} + +// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_out(at::Tensor & out, const at::Tensor & self, ::std::optional generator=::std::nullopt) { + return at::_ops::random_out::call(self, generator, out); +} +// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & random_outf(const at::Tensor & self, ::std::optional generator, at::Tensor & out) { + return at::_ops::random_out::call(self, generator, out); +} + +// aten::random(Tensor self, *, Generator? generator=None) -> Tensor +inline at::Tensor random(const at::Tensor & self, ::std::optional generator=::std::nullopt) { + return at::_ops::random::call(self, generator); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rename_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rename_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..993736cb929d752f21bb6cb1b14301cfa54b4220 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rename_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API rename_ { + using schema = at::Tensor & (at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rename_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, ::std::optional names); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, ::std::optional names); +}; + +struct TORCH_API rename { + using schema = at::Tensor (const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::rename") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, ::std::optional names); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional names); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..33cd084cb7825e4b235642b2e7aa59b1d6549d80 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sigmoid_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::sigmoid_backward_grad_input::call(grad_output, output, grad_input); +} +// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { + return at::_ops::sigmoid_backward_grad_input::call(grad_output, output, grad_input); +} + +// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor +inline at::Tensor sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::sigmoid_backward::call(grad_output, output); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/signbit_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/signbit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..305159867f9c46ee2ac0dddba8ddc203794102c1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/signbit_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_signbit_out : public at::meta::structured_signbit { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor signbit_sparse(const at::Tensor & self); +TORCH_API at::Tensor & signbit_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor signbit_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & signbit_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..399d1e36bafdbdfbc0f79d5f4efb273e4a7c298d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/slow_conv_dilated2d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor slow_conv_dilated2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_dilated2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..72baaa7c7470f189b3dbf0fc731771d31991da1e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API softshrink_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softshrink_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); +}; + +struct TORCH_API softshrink_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softshrink_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_bsr_tensor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_bsr_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..246e303d33f07741efdde2f8aa04bc9b4ca04fb4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_bsr_tensor.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); +} + +// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f2687ab41410b1071e1a37f3f276b2f3f05afe98 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sparse_resize(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +TORCH_API const at::Tensor & sparse_resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +TORCH_API const at::Tensor & sparse_resize_outf(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1e214cc11feb2f6932499aad20a8fac917f01442 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_entr_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_entr_out : public at::meta::structured_special_entr { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_round_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_round_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4e46e0ef203916d9aaa357d3cd7f6b7bc560f2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_round_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_round { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_round(Tensor self, *, int decimals=0) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t decimals); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals); +}; + +struct TORCH_API special_round_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t decimals, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79d6ca661168ecc22559ff5cbada3250710cafb4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_scaled_modified_bessel_k1(const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & x); +TORCH_API at::Tensor & special_scaled_modified_bessel_k1_outf(const at::Tensor & x, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sqrt_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sqrt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de5d530a8cf30e1c82665705ee5d25de1647bc4b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sqrt_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor sqrt(const at::Tensor & self); +TORCH_API at::Tensor & sqrt_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sqrt_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sqrt_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sym_stride_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sym_stride_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ae9fac9b5e5e987624937a712bab6f9060eaf639 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/sym_stride_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API c10::SymInt sym_stride(const at::Tensor & self, int64_t dim); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ff20733445d05424ae492bab0c23ee84bcb095f8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/thnn_conv2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0); +TORCH_API at::Tensor thnn_conv2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)); +TORCH_API at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0); +TORCH_API at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & thnn_conv2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0)); +TORCH_API at::Tensor & thnn_conv2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/uniform_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/uniform_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..068a76f750766227bc061c5eb24e3182a8f28ecf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/uniform_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API uniform_ { + using schema = at::Tensor & (at::Tensor &, double, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::uniform_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double from, double to, ::std::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from, double to, ::std::optional generator); +}; + +struct TORCH_API uniform_out { + using schema = at::Tensor & (const at::Tensor &, double, double, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::uniform") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double from, double to, ::std::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional generator, at::Tensor & out); +}; + +struct TORCH_API uniform { + using schema = at::Tensor (const at::Tensor &, double, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::uniform") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double from, double to, ::std::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, ::std::optional generator); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..3587f4323e5fa13ac3232f8674dfc7c09a758d21 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input); + } +} + +// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor +inline at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales); + } +} + +// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor +inline at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales); +} +namespace symint { + template ::value>> + at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales=::std::nullopt) { + return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales); + } +} + +}