diff --git a/.gitattributes b/.gitattributes index c12cfbf38dd5d0a853165dde84b23f498ca29518..f8cc4ed3bcca8f673990ad10a32f6ec6d56ae7a0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -396,3 +396,4 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans.ttf filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio/resources/images/chelsea.png filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/lib/libtorchaudio_ffmpeg.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backends/_backend_agg.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backends/_backend_agg.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backends/_backend_agg.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bae2f0d89631774822bff207dfa1d85050d459bf --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backends/_backend_agg.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceab688ea8c9efb1ab297cbe891175ad60a0eb0e0ed4bc82e0b86a75c3b07c7d +size 3779804 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_aminmax_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_aminmax_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15729d14323b4571900353d61a9688ac8ace3c83 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_aminmax_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _aminmax(const at::Tensor & self); +TORCH_API ::std::tuple _aminmax(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..880e75b76c9eebf2108e74f757e824dc88989b5e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _autocast_to_full_precision { + using schema = at::Tensor (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_autocast_to_full_precision") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..08c97dad3fff673386c480057c109d30cfd39f27 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..616da5b43851a739e2fc7ab57c93e1e5ddb5bb0f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_convert_indices_from_csr_to_coo_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _convert_indices_from_csr_to_coo(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_out(at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false); +TORCH_API at::Tensor & _convert_indices_from_csr_to_coo_outf(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6222264b18363d19ce27a23250695ab26836a7b1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_max_size_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API int64_t _cufft_get_plan_cache_max_size(int64_t device_index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_mul_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_mul_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..667b5c13383d051f2ab9744b8d61757b22dcba60 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_mul_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_mul(at::TensorList tensors, const at::Scalar & scalar); +TORCH_API void _foreach_mul_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_mul(at::TensorList tensors1, at::TensorList tensors2); +TORCH_API void _foreach_mul_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_mul(at::TensorList tensors, at::ArrayRef scalars); +TORCH_API void _foreach_mul_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_norm_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bcc8cefeceab2fc89bcad1466aa88de68a013cc1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_norm_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_norm_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_norm.Scalar(Tensor[] tensors, Scalar ord=2) -> Tensor[]") + static ::std::vector call(at::TensorList tensors, const at::Scalar & ord); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Scalar & ord); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..61a73041867d0df11267c55b40b59b2da27c445f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_sqrt_functional(at::TensorList self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_make_dual.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_make_dual.h new file mode 100644 index 0000000000000000000000000000000000000000..64e8e689aeae0fd19845eaae495e76ef0cdff855 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_make_dual.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) +TORCH_API inline at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual::call(primal, tangent, level); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_nested_tensor_layer_norm_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_nested_tensor_layer_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d344c4033d8bbeffcad7ea64c40c8d15669769f6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_nested_tensor_layer_norm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor NestedTensor_layer_norm(const at::Tensor & self, const c10::optional & weight, const c10::optional & bias, double eps); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_remove_batch_dim_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_remove_batch_dim_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4c38a770adb6a2c7ee6104d280b277118ff1bb31 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_remove_batch_dim_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _remove_batch_dim { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_remove_batch_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b8bc6031e070475ba84058e94f1a28390c85910c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f0d1d0ecb1938508a43b458feeb24cc337375411 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _slow_conv2d_forward_output { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_slow_conv2d_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); +}; + +struct TORCH_API _slow_conv2d_forward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, const c10::optional &, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_slow_conv2d_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_native.h new file mode 100644 index 0000000000000000000000000000000000000000..398b469300e4f55386bc7ed002262365732c7910 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..514f85ff42d7d4c1c8c5b6f44bcf7365aa251a1c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_log_softmax_int { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +struct TORCH_API _sparse_log_softmax_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype); +}; + +struct TORCH_API _sparse_log_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_mm_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_mm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fd4348323955fef77fec57689616a7ee1b286db6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_mm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _sparse_mm(const at::Tensor & sparse, const at::Tensor & dense); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..223e898566afaa543d757f71fef40963445afe19 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor transformer_encoder_layer_forward(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_trilinear.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_trilinear.h new file mode 100644 index 0000000000000000000000000000000000000000..177e9f590d5305e16e9188a0de2ab61d7b337084 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_trilinear.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor +TORCH_API inline at::Tensor _trilinear(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) { + return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a4dd3bfe5635f18a68b2a93117c1b0fb8fd2ad26 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool3d_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API adaptive_max_pool3d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addcdiv_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addcdiv_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cccb6190b5c1a61c1b8f0c38a87618f545ad83c7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addcdiv_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addmm_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..53ea7f2c1493046fd6ea779f5cd82a1cbc3575ba --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addmm_native.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_addmm_out_cpu : public at::meta::structured_addmm { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, const at::Tensor & out); +}; +struct TORCH_API structured_addmm_out_cuda : public at::meta::structured_addmm { +void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, const at::Tensor & out); +}; +TORCH_API at::Tensor addmm_sparse_dense_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out_sparse_dense_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & s_addmm_sparse_dense_cpu_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor addmm_sparse_dense_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out_sparse_dense_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & s_addmm_sparse_dense_cuda_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor addmm_sparse_compressed_dense(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out_sparse_compressed_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addmm_out_sparse_compressed_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd17b9bdb461c818138c96fc12d1735b632585af --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/affine_grid_generator_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor affine_grid_generator_backward(const at::Tensor & grad, at::IntArrayRef size, bool align_corners); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/any_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/any_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1c89b9aa8374aeb2c8d6ddaba291c8493ab26ec --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/any_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & any_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & any_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/blackman_window_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/blackman_window_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1fc7bae7457abd8fa98602f90f9c172ec926dce5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/blackman_window_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor blackman_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor blackman_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor blackman_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv_depthwise3d_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv_depthwise3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..40c8de6e607fd9efc483b064fa8fae39802895d6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv_depthwise3d_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor conv_depthwise3d_cuda(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ba8000d5154050959c0bfa3a44ed9bf00ec09e2d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cumsum_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cumsum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..52b7e7c702ae53368644fec448421dee1492e9db --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cumsum_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_cumsum_out : public at::meta::structured_cumsum { +void impl(const at::Tensor & self, int64_t dim, c10::optional dtype, const at::Tensor & out); +}; +TORCH_API at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumsum_out(const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumsum_(at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/deg2rad.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/deg2rad.h new file mode 100644 index 0000000000000000000000000000000000000000..bdf070345ad76f48c294db75d140e5a74d3ba580 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/deg2rad.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::deg2rad(Tensor self) -> Tensor +TORCH_API inline at::Tensor deg2rad(const at::Tensor & self) { + return at::_ops::deg2rad::call(self); +} + +// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) +TORCH_API inline at::Tensor & deg2rad_(at::Tensor & self) { + return at::_ops::deg2rad_::call(self); +} + +// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & deg2rad_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::deg2rad_out::call(self, out); +} + +// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & deg2rad_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::deg2rad_out::call(self, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad57438b87e9c54a46b7f62cf445ba8cfa4b0979 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_hfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ifftshift_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ifftshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc03b760505b86555f13a4daf5047e6b73a54ee4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ifftshift_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/flatten_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/flatten_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..484326525aabb5dfe48376df5032187fa90c6067 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/flatten_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor flatten(const at::Tensor & self, int64_t start_dim=0, int64_t end_dim=-1); +TORCH_API at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim); +TORCH_API at::Tensor flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim); +TORCH_API at::Tensor flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/flip.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/flip.h new file mode 100644 index 0000000000000000000000000000000000000000..a7b1a37ae44e9fc0d663d8c644e8ce7bf48ca852 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/flip.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::flip(Tensor self, int[] dims) -> Tensor +TORCH_API inline at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip::call(self, dims); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dba6169de90c6664a8acec6afe9b29096a12e05a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fractional_max_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fractional_max_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API fractional_max_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fractional_max_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..07c7cecf7ba94e2528418fd6936effec27ccc8ba --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fractional_max_pool3d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_fractional_max_pool3d_out_cpu : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; +struct TORCH_API structured_fractional_max_pool3d_out_cuda : public at::meta::structured_fractional_max_pool3d { +void impl(const at::Tensor & self, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW, int64_t outputT, int64_t outputH, int64_t outputW, const at::Tensor & random_samples, int64_t numBatch, int64_t numPlanes, int64_t inputT, int64_t inputH, int64_t inputW, const at::Tensor & output, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/gelu_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/gelu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3cdc3996fdb7468b985c7cde9f894433e4c7b616 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/gelu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor gelu(const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_out(at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_outf(const at::Tensor & self, c10::string_view approximate, at::Tensor & out); +TORCH_API at::Tensor & gelu_(at::Tensor & self, c10::string_view approximate="none"); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aefe9658557bff07f2d23b3d04eb4b93cefe33a5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/glu_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor glu(const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_out(at::Tensor & out, const at::Tensor & self, int64_t dim=-1); +TORCH_API at::Tensor & glu_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/grid_sampler_2d_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/grid_sampler_2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d0a004abcf0d0559eea81053433c3dd147578455 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/grid_sampler_2d_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e20abaad71c819df823ed82aa19b0d55bf11487e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/huber_loss_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isnan_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isnan_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6152f70975c53507a8bd069861d95ce8d3adfc5a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isnan_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor isnan(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse(const at::Tensor & self); +TORCH_API at::Tensor isnan_sparse_csr(const at::Tensor & self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9254589777a0841c01e093fde46c7ec4e593e6ec --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/kthvalue_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae380cc18fdaa690dc9b5a0d8cca27e64e8764a7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_tensorinv_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_tensorinv(const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_out(at::Tensor & out, const at::Tensor & self, int64_t ind=2); +TORCH_API at::Tensor & linalg_tensorinv_outf(const at::Tensor & self, int64_t ind, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_tensorinv_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_tensorinv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..be95cb4a747a07e836c27f139e1fa07eb56f1120 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_tensorinv_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_tensorinv { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_tensorinv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_tensorinv(Tensor self, int ind=2) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t ind); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind); +}; + +struct TORCH_API linalg_tensorinv_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_tensorinv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t ind, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c46f1037217dc754867bc80a72083644a84d0234 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_vector_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_vector_norm { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_vector_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +struct TORCH_API linalg_vector_norm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::OptionalIntArrayRef, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_vector_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logical_and.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logical_and.h new file mode 100644 index 0000000000000000000000000000000000000000..7a6d42de0e5bd4ae0b4ec66dc28c85523377df0a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logical_and.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::logical_and(Tensor self, Tensor other) -> Tensor +TORCH_API inline at::Tensor logical_and(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and::call(self, other); +} + +// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & logical_and_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and_out::call(self, other, out); +} + +// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & logical_and_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_and_out::call(self, other, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e930571b7dc3a059f8834acbaa37da40558408f9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logical_not_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & logical_not_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & logical_not_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logspace_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logspace_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5a1a08f5d0c4459e9dba74c5106c2a523a88b360 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logspace_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & logspace_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); +TORCH_API at::Tensor & logspace_cuda_out(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/masked_select_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/masked_select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..aa7e1b90f74ced952851dda55a2805563370c7a0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/masked_select_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API masked_select_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out); +}; + +struct TORCH_API masked_select { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::masked_select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "masked_select(Tensor self, Tensor mask) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution_native.h new file mode 100644 index 0000000000000000000000000000000000000000..40559ab6569e85787d26f5e972e8da3d781b8211 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..072391ccd96591ed41ecefcd5eb1e7db2e445c2d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_rnn_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) +TORCH_API inline ::std::tuple> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..80277ec58c8869240ffb7eba261ff96f145be8c7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +TORCH_API inline at::Tensor mkldnn_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a30f6a14309a750af9e90ceb66ea1cc95c8e90b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nanmedian_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple nanmedian(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple nanmedian_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple nanmedian_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss2d.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss2d.h new file mode 100644 index 0000000000000000000000000000000000000000..da4205e474c0dd75c40351a77bd1a7109c914766 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss2d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & nll_loss2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out); +} + +// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & nll_loss2d_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { + return at::_ops::nll_loss2d_out::call(self, target, weight, reduction, ignore_index, out); +} + +// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor +TORCH_API inline at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/one_hot.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/one_hot.h new file mode 100644 index 0000000000000000000000000000000000000000..98bc89138011cb3b913920e1a410c1339b3ce0ae --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/one_hot.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor +TORCH_API inline at::Tensor one_hot(const at::Tensor & self, int64_t num_classes=-1) { + return at::_ops::one_hot::call(self, num_classes); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/randint_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/randint_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f280ceeed3ce95e0820b6073032602f5a91d0211 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/randint_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API randint { + using schema = at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API randint_generator { + using schema = at::Tensor (int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API randint_low { + using schema = at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t low, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API randint_low_generator { + using schema = at::Tensor (int64_t, int64_t, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API randint_out { + using schema = at::Tensor & (int64_t, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t high, at::IntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, at::Tensor & out); +}; + +struct TORCH_API randint_generator_out { + using schema = at::Tensor & (int64_t, at::IntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "generator_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API randint_low_out { + using schema = at::Tensor & (int64_t, int64_t, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out); +}; + +struct TORCH_API randint_low_generator_out { + using schema = at::Tensor & (int64_t, int64_t, at::IntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_generator_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/random_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/random_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f72192d05f28978113d03069d3a231d6b3c86fe1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/random_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor random_functional(const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_meta_(at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor random_functional(const at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_meta_(at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor random_functional(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_(at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & random_meta_(at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/record_stream_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/record_stream_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0713f35096e693b817215775b3ac747773164276 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/record_stream_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void record_stream(at::Tensor & self, at::Stream s); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..047673694e749d9b67a50971e87cae3d967e82cb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/resize_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/resize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f7c56f1ce31d9c2a87227f2b9d37670ed265dfb8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/resize_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API resize_ { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format); +}; + +struct TORCH_API resize_out { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize.out(Tensor self, int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out); +}; + +struct TORCH_API resize_functional { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize.functional(Tensor self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/result_type_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/result_type_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f0e91f882b095f8bb3a35ec9e89b1b99f1ab8881 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/result_type_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::ScalarType result_type(const at::Tensor & tensor, const at::Tensor & other); +TORCH_API at::ScalarType result_type(const at::Tensor & tensor, const at::Scalar & other); +TORCH_API at::ScalarType result_type(const at::Scalar & scalar, const at::Tensor & tensor); +TORCH_API at::ScalarType result_type(const at::Scalar & scalar1, const at::Scalar & scalar2); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/row_stack_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/row_stack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1c3aeeed16af08b94d2adba2be62cc7dc371dfaf --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/row_stack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor row_stack(at::TensorList tensors); +TORCH_API at::Tensor & row_stack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & row_stack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rsub.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rsub.h new file mode 100644 index 0000000000000000000000000000000000000000..7e61c44054411b79aa1e28e6e9218b71bb50314d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rsub.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +TORCH_API inline at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor::call(self, other, alpha); +} + +// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +TORCH_API inline at::Tensor rsub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar::call(self, other, alpha); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rsub_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rsub_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4d4b1869672c3d5a60321f426b23293f334fb4a5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rsub_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor rsub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_add_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_add_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..59328cbc756e7ecc5072c49732afe74680451ed6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_add_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor scatter_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_add_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_add_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out); +TORCH_API at::Tensor & scatter_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/silu_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/silu_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..612b20bc630fb3097dd202d63b5d65dbfcb09e0f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/silu_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_silu : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sin_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e48f8fc83c012f2faf4689c437deb6b813f2552 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sin_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor sin(const at::Tensor & self); +TORCH_API at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sin_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/softshrink_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/softshrink_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46752d2c17034617d6c46f3767e9a1b47d088d1a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/softshrink_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor softshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & softshrink_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5); +TORCH_API at::Tensor & softshrink_outf(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f359a786faf77e8e1605fcae411fb9377ffc54ee --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_entr(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_i0_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_i0_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3f6c636f356df60fdae86f3f6e033f6ddd20a2a0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_i0_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_i0(const at::Tensor & self); +TORCH_API at::Tensor & special_i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sub_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sub_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e1fa8e59c137919daa2f70191cc1c540bf0feea --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sub_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & sub_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/svd_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/svd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7ded1579c82c630e6e7c4ce60e063f4e32795844 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/svd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple svd(const at::Tensor & self, bool some=true, bool compute_uv=true); +TORCH_API ::std::tuple svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some=true, bool compute_uv=true); +TORCH_API ::std::tuple svd_outf(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tanh_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tanh_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1a01be8a37682b3b42f4b9dc58c0783e82005f40 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tanh_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_tanh : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/transpose_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/transpose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e939d12139998dcb0bf944e5e53dbfded9b50ab2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/transpose_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API transpose_int { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +struct TORCH_API transpose_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, at::Dimname); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim0, at::Dimname dim1); +}; + +struct TORCH_API transpose_ { + using schema = at::Tensor & (at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::transpose_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trapz_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trapz_native.h new file mode 100644 index 0000000000000000000000000000000000000000..981f7598328f519278ccdb6832f929368796c8c1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trapz_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/var_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/var_native.h new file mode 100644 index 0000000000000000000000000000000000000000..946ba2f2c23511b2f1fc5366cc812dc1f68e8a04 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/var_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor var(const at::Tensor & self, bool unbiased=true); +TORCH_API at::Tensor var(const at::Tensor & self, at::IntArrayRef dim, bool unbiased=true, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::IntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor var(const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor var(const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false); +TORCH_API at::Tensor & var_out(const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/xlogy_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/xlogy_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d7ac12b254998e5d772f0fd116055e160080f0fd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/xlogy_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at