diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8899e306a53bae4a653ecd9a8ba13120c70f2e1f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _backward(const at::Tensor & self, at::TensorList inputs, const ::std::optional & gradient={}, ::std::optional retain_graph=::std::nullopt, bool create_graph=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_batch_norm_no_update.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_batch_norm_no_update.h new file mode 100644 index 0000000000000000000000000000000000000000..cf1d3f086a11b8f3c32104cea461a831bff454a8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_batch_norm_no_update.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_batch_norm_no_update(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _batch_norm_no_update(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps) { + return at::_ops::_batch_norm_no_update::call(input, weight, bias, running_mean, running_var, momentum, eps); +} + +// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +inline ::std::tuple _batch_norm_no_update_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps) { + return at::_ops::_batch_norm_no_update_out::call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2, out3); +} +// aten::_batch_norm_no_update.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +inline ::std::tuple _batch_norm_no_update_outf(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::_batch_norm_no_update_out::call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2, out3); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_batch_norm_no_update_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_batch_norm_no_update_native.h new file mode 100644 index 0000000000000000000000000000000000000000..45c676ff3e0f87d816ef8c7a7cef80cb2447679e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_batch_norm_no_update_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _batch_norm_no_update(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps); +TORCH_API ::std::tuple _batch_norm_no_update_out(const at::Tensor & input, const ::std::optional & weight, const ::std::optional & bias, const ::std::optional & running_mean, const ::std::optional & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Float_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Float_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1dbb2b7e04a08b9024f082bd9a6b41210e2c867b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cast_Float_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Float(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..374b8a335b0503f5d0c235c173207bfae666b5c4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_coalesced_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _coalesced(const at::Tensor & self, bool coalesced); +TORCH_API at::Tensor & _coalesced_out(at::Tensor & out, const at::Tensor & self, bool coalesced); +TORCH_API at::Tensor & _coalesced_outf(const at::Tensor & self, bool coalesced, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_physical_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_physical_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9a7db8a44d2501cceec25333fd1adc16226526d2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conj_physical_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _conj_physical { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_conj_physical") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_conj_physical(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _conj_physical_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_conj_physical") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conv_depthwise2d_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conv_depthwise2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..077d26c14b369513f9d9bf76e6f53ab69cfd5bf2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_conv_depthwise2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _conv_depthwise2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation); +TORCH_API at::Tensor _conv_depthwise2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); +TORCH_API const at::Tensor & _conv_depthwise2d_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation); +TORCH_API const at::Tensor & _conv_depthwise2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out); +TORCH_API const at::Tensor & _conv_depthwise2d_symint_out(const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation); +TORCH_API const at::Tensor & _conv_depthwise2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, const at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cslt_sparse_mm.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cslt_sparse_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..c974fc0f1e54b5fb61b4e49dd6bcacf5c2632488 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_cslt_sparse_mm.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cslt_sparse_mm(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False, int alg_id=0) -> Tensor +inline at::Tensor _cslt_sparse_mm(const at::Tensor & compressed_A, const at::Tensor & dense_B, const ::std::optional & bias={}, const ::std::optional & alpha={}, ::std::optional out_dtype=::std::nullopt, bool transpose_result=false, int64_t alg_id=0) { + return at::_ops::_cslt_sparse_mm::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_exp_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_exp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3336692ddb41f4dbb6532842233f09e890c9b86f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_foreach_exp_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_exp(at::TensorList self); +TORCH_API void _foreach_exp_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7fe83cd13329e3f40fcaa95923b79d5313f15397 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _histogramdd_bin_edges { + using schema = ::std::vector (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_histogramdd_bin_edges") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]") + static ::std::vector call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density); +}; + +struct TORCH_API _histogramdd_bin_edges_out { + using schema = void (const at::Tensor &, at::IntArrayRef, ::std::optional>, const ::std::optional &, bool, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_histogramdd_bin_edges") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()") + static void call(const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, ::std::optional> range, const ::std::optional & weight, bool density, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..188e5b4769cc53829bf3e13f85ee2ee51dd1fb55 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_make_dual_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_native_multi_head_attention_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_native_multi_head_attention_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d2db1b5fd1e495a9b3fa81423baa01846717c3dd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_native_multi_head_attention_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _native_multi_head_attention_out(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask, bool need_weights, bool average_attn_weights, ::std::optional mask_type, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple native_multi_head_attention_cpu(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, ::std::optional mask_type=::std::nullopt); +TORCH_API ::std::tuple native_multi_head_attention_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, ::std::optional mask_type=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nnpack_available_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nnpack_available_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3ae7647b1c015d690abd08f14e9e517f0e821676 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_nnpack_available_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool _nnpack_available(); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5e60b7ffb3ce4d6995b796085283fb862a4e780 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b78d84e0ba46eab0ea7ad14a0416d98d079f6cbd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _scaled_dot_product_efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal=false, ::std::optional scale=::std::nullopt) { + return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b58e45b49184badfc26ac658282ac17d120f57f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_segment_reduce_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _segment_reduce_backward(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const ::std::optional & lengths={}, const ::std::optional & offsets={}, int64_t axis=0, const ::std::optional & initial=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..98da0bbcf379d819f884e34ef483207ef8613eda --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..476c3fbd665fcfa4e5517264150d8f1b1de7f6cc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_differentiable_lstm_cell_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _thnn_differentiable_lstm_cell_backward { + using schema = ::std::tuple (const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &, const ::std::optional &, const ::std::optional &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_differentiable_lstm_cell_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const ::std::optional & input_bias, const ::std::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const ::std::optional & input_bias, const ::std::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..6afaebe9c1a4e933e7f8102684e50112a4ba4568 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _thnn_fused_lstm_cell_backward_impl(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias); +} + +// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _thnn_fused_lstm_cell_backward_impl_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); +} +// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +inline ::std::tuple _thnn_fused_lstm_cell_backward_impl_outf(const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::call(grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2f808ca8f2f80dc3cbdfc5ef80e7f27eb7cc3296 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_to_sparse_semi_structured_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _to_sparse_semi_structured { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_semi_structured") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_semi_structured(Tensor dense) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & dense); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dense); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_values_copy_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_values_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5ea7e87e7f3238853b048c222fcd8164ebbc6dc4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/_values_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _values_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_values_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_values_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _values_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_values_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addmv_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addmv_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0868238aa41a2b2d6d4b71beb24ec110387547c3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addmv_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addr_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addr_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7a60d99edf9ccafcaa9a1cb61a2f52afc0f1f1a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/addr_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/as_strided.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/as_strided.h new file mode 100644 index 0000000000000000000000000000000000000000..5e9f2fa48a701404bd22789361e971fdc354ddbd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/as_strided.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); +} +namespace symint { + template ::value>> + at::Tensor as_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); + } +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor as_strided_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + at::Tensor as_strided(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided::call(self, size, stride, storage_offset); + } +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); +} +namespace symint { + template ::value>> + const at::Tensor & as_strided_(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? ::std::make_optional(c10::SymInt(*storage_offset)) : ::std::nullopt); + } +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & as_strided__symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, size, stride, storage_offset); +} +namespace symint { + template ::value>> + const at::Tensor & as_strided_(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional storage_offset=::std::nullopt) { + return at::_ops::as_strided_::call(self, size, stride, storage_offset); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_backward.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..582bae27aebf50a594113224764e9eeffe22b299 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/batch_norm_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor) +inline ::std::tuple batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, bool update, double eps, ::std::array output_mask, const at::Tensor & reserve) { + return at::_ops::batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_left_shift_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_left_shift_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a932f8ef0efeb98ec395ba59ced02b0c00b726fc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/bitwise_left_shift_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_left_shift_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_left_shift_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_left_shift_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cholesky_inverse_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cholesky_inverse_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3ba70016c469b12991661ad45e5a067fe3dbecd6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cholesky_inverse_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cholesky_inverse(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & cholesky_inverse_out(const at::Tensor & self, bool upper, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_max_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_max_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8249f6b9417781cfba82b290c67a7788d17e9d50 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/clamp_max_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API clamp_max { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max(Tensor self, Scalar max) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max); +}; + +struct TORCH_API clamp_max_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max.Tensor(Tensor self, Tensor max) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & max); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max); +}; + +struct TORCH_API clamp_max_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & max); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max); +}; + +struct TORCH_API clamp_max__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & max); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max); +}; + +struct TORCH_API clamp_max_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out); +}; + +struct TORCH_API clamp_max_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::clamp_max") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & max, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_tbc.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_tbc.h new file mode 100644 index 0000000000000000000000000000000000000000..4cbc5a15f1ce2fc6c583db083b36417ca75db962 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/conv_tbc.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor +inline at::Tensor conv_tbc(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc::call(self, weight, bias, pad); +} + +// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & conv_tbc_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out); +} +// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & conv_tbc_outf(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { + return at::_ops::conv_tbc_out::call(self, weight, bias, pad, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/convolution_overrideable.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/convolution_overrideable.h new file mode 100644 index 0000000000000000000000000000000000000000..f6cb31c73fafb0c429f9a23c477c4e7fc52f8acc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/convolution_overrideable.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor +inline at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups); +} +namespace symint { + template ::value>> + at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups); + } +} + +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor +inline at::Tensor convolution_overrideable_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); +} +namespace symint { + template ::value>> + at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), transposed, c10::fromIntArrayRefSlow(output_padding), groups, out); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } +} + +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & convolution_overrideable_symint_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & convolution_overrideable_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..377d8ba7910632400e3a2a5f00a8dae4ac99748f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/cumprod_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cumprod { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumprod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +struct TORCH_API cumprod_ { + using schema = at::Tensor & (at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumprod_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +struct TORCH_API cumprod_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumprod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); +}; + +struct TORCH_API cumprod_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumprod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional dtype); +}; + +struct TORCH_API cumprod__dimname { + using schema = at::Tensor & (at::Tensor &, at::Dimname, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumprod_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, at::Dimname dim, ::std::optional dtype); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, ::std::optional dtype); +}; + +struct TORCH_API cumprod_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumprod") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_ifft_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_ifft_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a603e3a7edcdd86873dbf59fd7c8f8d70f1ed22d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_ifft_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_ifft_symint(const at::Tensor & self, ::std::optional n=::std::nullopt, int64_t dim=-1, ::std::optional norm=::std::nullopt); +TORCH_API at::Tensor & fft_ifft_symint_out(const at::Tensor & self, ::std::optional n, int64_t dim, ::std::optional norm, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1e3d688192213860d7481d43251e841342c5b15d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_irfft2 { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_irfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm); +}; + +struct TORCH_API fft_irfft2_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_irfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gather_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gather_native.h new file mode 100644 index 0000000000000000000000000000000000000000..51c768ff7b71a6a2b2fa81f8980367e9b8190aa2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gather_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gather_out : public at::meta::structured_gather { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, const at::Tensor & out); +}; +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_meta_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dadf8681c1931693a99c1c95ef8be95981a724d5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/glu_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gradient_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gradient_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5824553346d1bbb91e526983e02c21dbb1256d17 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/gradient_compositeimplicitautograd_dispatch.h @@ -0,0 +1,29 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector gradient(const at::Tensor & self, const ::std::optional & spacing=::std::nullopt, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, ::std::optional dim=::std::nullopt, int64_t edge_order=1); +TORCH_API ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ddedaf4cb1de8e242b8313803308e3c73eb75a42 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/hardsigmoid_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e7e3a6ae918f437a65a7efa32ebca8ad052dd9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..040e66f3f0d52ff320abf6dd69324e76172f1dc3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_vector_norm { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_vector_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype); +}; + +struct TORCH_API linalg_vector_norm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_vector_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log1p_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log1p_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..caa9a8c54f7256eaf657580873e6e7c3b6a058b7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/log1p_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log1p { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log1p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log1p(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log1p_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log1p_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log1p_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log1p_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log1p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d587f1a4a38ad074d5e84659e00e39d9db55b83 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/logical_xor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logical_xor(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_xor_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d71fb6b03d19e081867cd074b18cb47ee363e0a4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/lstm_mps_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void lstm_mps_backward_out(const ::std::optional & grad_y, const ::std::optional & grad_hy, const ::std::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e22f2ee64d953a68b442a3fc2129edad574f5ea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/median_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple median(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple median_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_batch_norm_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_batch_norm_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..77a27ead3adf1657ee8ca09a171c6408f1e29a40 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_batch_norm_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple miopen_batch_norm_backward_out(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple miopen_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const ::std::optional & running_mean, const ::std::optional & running_var, const ::std::optional & save_mean, const ::std::optional & save_var, double epsilon); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_rnn_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_rnn_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..060c6e5e8a7705505b9e7ec8f87dd05009aedbde --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/miopen_rnn_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void miopen_rnn_backward_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); +TORCH_API ::std::tuple> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const ::std::optional & cx, const at::Tensor & output, const ::std::optional & grad_output, const ::std::optional & grad_hy, const ::std::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const ::std::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_linear_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_linear_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1a7a4478bacc2db07e9c7cb5043ae8e6498a4b00 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mkldnn_linear_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_linear { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_linear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias); +}; + +struct TORCH_API mkldnn_linear_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_linear") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mul_meta.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mul_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a0a16b99e0d073ce32fc3df896f0d931d3dfba1b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/mul_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_mul_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/multiply_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/multiply_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a680b508fb96f9eda4f55874bf5e0b812be850e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/multiply_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor multiply(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & multiply_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & multiply_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor multiply(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & multiply_(at::Tensor & self, const at::Scalar & other); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..14333ee0cbf4cf09e98c1b6eee9f54ae51d0d491 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/native_dropout_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_dropout { + using schema = ::std::tuple (const at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_dropout") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, double p, ::std::optional train); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional train); +}; + +struct TORCH_API native_dropout_out { + using schema = ::std::tuple (const at::Tensor &, double, ::std::optional, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_dropout") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & input, double p, ::std::optional train, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, ::std::optional train, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nonzero_static_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nonzero_static_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9690c07494ae12e7afdad1cc7de61e9c09d1ec3b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/nonzero_static_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor nonzero_static(const at::Tensor & self, int64_t size, int64_t fill_value=-1); +TORCH_API at::Tensor & nonzero_static_out(at::Tensor & out, const at::Tensor & self, int64_t size, int64_t fill_value=-1); +TORCH_API at::Tensor & nonzero_static_outf(const at::Tensor & self, int64_t size, int64_t fill_value, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/normal.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/normal.h new file mode 100644 index 0000000000000000000000000000000000000000..e31697d019c908ca3fe44ee81baae0f3fa6d4953 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/normal.h @@ -0,0 +1,169 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor +inline at::Tensor normal_functional(const at::Tensor & self, double mean=0, double std=1, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_functional::call(self, mean, std, generator); +} + +// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, double std=1, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_Tensor_float_out::call(mean, std, generator, out); +} +// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_outf(const at::Tensor & mean, double std, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_Tensor_float_out::call(mean, std, generator, out); +} + +// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor +inline at::Tensor normal(const at::Tensor & mean, double std=1, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_Tensor_float::call(mean, std, generator); +} + +// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_out(at::Tensor & out, double mean, const at::Tensor & std, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_float_Tensor_out::call(mean, std, generator, out); +} +// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_outf(double mean, const at::Tensor & std, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_float_Tensor_out::call(mean, std, generator, out); +} + +// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor +inline at::Tensor normal(double mean, const at::Tensor & std, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_float_Tensor::call(mean, std, generator); +} + +// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_Tensor_Tensor_out::call(mean, std, generator, out); +} +// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_outf(const at::Tensor & mean, const at::Tensor & std, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_Tensor_Tensor_out::call(mean, std, generator, out); +} + +// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor +inline at::Tensor normal(const at::Tensor & mean, const at::Tensor & std, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_Tensor_Tensor::call(mean, std, generator); +} + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor normal(double mean, double std, at::IntArrayRef size, ::std::optional generator=::std::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor normal(double mean, double std, at::IntArrayRef size, ::std::optional generator=::std::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor normal(double mean, double std, at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor normal(double mean, double std, at::IntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::normal_float_float::call(mean, std, c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } +} + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, ::std::optional generator=::std::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::call(mean, std, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor normal(double mean, double std, c10::SymIntArrayRef size, ::std::optional generator=::std::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::call(mean, std, size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::normal_float_float::call(mean, std, size, generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor normal(double mean, double std, c10::SymIntArrayRef size, ::std::optional generator, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::normal_float_float::call(mean, std, size, generator, dtype, layout, device, pin_memory); + } +} + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & normal_out(at::Tensor & out, double mean, double std, at::IntArrayRef size, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & normal_outf(double mean, double std, at::IntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::call(mean, std, c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_symint_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_float_float_out::call(mean, std, size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & normal_out(at::Tensor & out, double mean, double std, c10::SymIntArrayRef size, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_float_float_out::call(mean, std, size, generator, out); + } +} + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_symint_outf(double mean, double std, c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::call(mean, std, size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & normal_outf(double mean, double std, c10::SymIntArrayRef size, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::call(mean, std, size, generator, out); + } +} + +// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_out(at::Tensor & out, const at::Tensor & self, double mean=0, double std=1, ::std::optional generator=::std::nullopt) { + return at::_ops::normal_out::call(self, mean, std, generator, out); +} +// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & normal_outf(const at::Tensor & self, double mean, double std, ::std::optional generator, at::Tensor & out) { + return at::_ops::normal_out::call(self, mean, std, generator, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..69fae58198b17062f916b36cac3e0f2ab7934de9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/permute_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & permute_copy_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); +TORCH_API at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/polygamma.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/polygamma.h new file mode 100644 index 0000000000000000000000000000000000000000..94f409788e70e0159680b985fd412cdd6e395598 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/polygamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::polygamma_out::call(n, self, out); +} +// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::polygamma_out::call(n, self, out); +} + +// aten::polygamma(int n, Tensor self) -> Tensor +inline at::Tensor polygamma(int64_t n, const at::Tensor & self) { + return at::_ops::polygamma::call(n, self); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_cpu_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..836707acf2992d7c2e2038e891327f67e8f40178 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/prod_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor prod(const at::Tensor & self, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, ::std::optional dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_max_pool3d.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..1183728a866c9e4a452794776f0c0147d08f64e0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_max_pool3d.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantized_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +inline at::Tensor quantized_max_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantized_max_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out); +} +// aten::quantized_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & quantized_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool3d_out::call(self, kernel_size, stride, padding, dilation, ceil_mode, out); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b8cc6c660d8c4a9c23fe20c0c75a77f559edeef --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/quantized_rnn_relu_cell_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor quantized_rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ee92d8a790a1291c8da8edf14fdeac7871896bd7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reflection_pad3d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); +}; + +struct TORCH_API reflection_pad3d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rnn_tanh_cell.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rnn_tanh_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..bd090f10e27db9d498d520721f8fc1ca11f1ea53 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rnn_tanh_cell.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +inline at::Tensor rnn_tanh_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const ::std::optional & b_ih={}, const ::std::optional & b_hh={}) { + return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rrelu_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rrelu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5e79ef907833fe5860ba9b255170d7e0548ffaea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/rrelu_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor rrelu(const at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & rrelu_(at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scalar_tensor_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scalar_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..44b1cc93031851408f4066071d883cc144e42d64 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/scalar_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor scalar_tensor(const at::Scalar & s, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor & scalar_tensor_out(const at::Scalar & s, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softmax_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softmax_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a15f33792ff22eb0eddb732dc955697e1015c6a3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softmax_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & softmax_outf(const at::Tensor & self, int64_t dim, ::std::optional dtype, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b8504ae8fd310f61b58992c4ac5f5ff8f023f432 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor softplus_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_backward_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9e64446b5fcc9bf660b0d1b3bc8fcef0dafdb87b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/softplus_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_softplus_backward_out : public at::meta::structured_softplus_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3f6c636f356df60fdae86f3f6e033f6ddd20a2a0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_i0(const at::Tensor & self); +TORCH_API at::Tensor & special_i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7d97d53724285982e6bf90dbb3b1e6c98bf9515d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_i0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_i0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_i0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_i0(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_i0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_i0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0207b570537da2f4761aa29146312be579cd3cc6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_modified_bessel_k0(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..397b8eebf6a5c193fe9edd8393c36566a4bc4bd6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_ndtri(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..31ef5d6fb71f772eadf1f2ef76b7b45859af8a06 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_spherical_bessel_j0_out : public at::meta::structured_special_spherical_bessel_j0 { +void impl(const at::Tensor & x, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b7e1630d3dcd22d77d4426a14533266d64651539 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_zeta(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & special_zeta_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & special_zeta_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor special_zeta(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & special_zeta_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & special_zeta_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/std_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/std_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ffe0540f707b01395f6fb44cbce0dab34bbd9e65 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/std_ops.h @@ -0,0 +1,116 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API std { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std(Tensor self, bool unbiased=True) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool unbiased); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased); +}; + +struct TORCH_API std_dim { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); +}; + +struct TORCH_API std_correction { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim); +}; + +struct TORCH_API std_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API std_correction_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const ::std::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API std_names_dim { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim); +}; + +struct TORCH_API std_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API std_correction_names { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, const ::std::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim); +}; + +struct TORCH_API std_correction_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, const ::std::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::std") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b7b4dcbd18c74ee71abc570b39c68411e29d7202 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/take_along_dim_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor take_along_dim(const at::Tensor & self, const at::Tensor & indices, ::std::optional dim=::std::nullopt); +TORCH_API at::Tensor & take_along_dim_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, ::std::optional dim=::std::nullopt); +TORCH_API at::Tensor & take_along_dim_outf(const at::Tensor & self, const at::Tensor & indices, ::std::optional dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/true_divide_compositeimplicitautograd_dispatch.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/true_divide_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3672187aeb576cf669fa4e8ceecd45a5dbec356f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/true_divide_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor true_divide(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & true_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & true_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & true_divide_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor true_divide(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & true_divide_(at::Tensor & self, const at::Scalar & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f16695daa4540446ce047785e8c9165092a77a65 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_trilinear3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_trilinear3d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, ::std::optional, ::std::optional, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, ::std::optional scales_d, ::std::optional scales_h, ::std::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_native.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2e19524228faa48136974eb7f3ec64eac22a2b7a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/view_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor view_nested(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor mkldnn_view(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor view_dtype(const at::Tensor & self, at::ScalarType dtype); +} // namespace native +} // namespace at diff --git a/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor.h b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor.h new file mode 100644 index 0000000000000000000000000000000000000000..92074e596901388761d0de5a1f78a39d82d2744f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/torch/include/ATen/ops/xor.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor __xor__(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__xor___Scalar::call(self, other); +} + +// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor __xor__(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__xor___Tensor::call(self, other); +} + +}