diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cbb87c847b7aa65ac2510ec88c1a8684aee3cb2f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _adaptive_avg_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_adaptive_avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..87c31883179e6c11172d3ed8be408e00041541c3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _embedding_bag_per_sample_weights_backward_cpu(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1); +TORCH_API at::Tensor _embedding_bag_per_sample_weights_backward_cuda(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5e49ab2fb0d33e7c8c495bbd9db37f787a42c081 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_div(at::TensorList tensors, const at::Scalar & scalar); +TORCH_API void _foreach_div_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_div(at::TensorList tensors1, at::TensorList tensors2); +TORCH_API void _foreach_div_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_div(at::TensorList tensors, at::ArrayRef scalars); +TORCH_API void _foreach_div_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..708d50664b9aaf832a56498dc4419e6d97c56a51 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_expm1_functional(at::TensorList self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_gather_sparse_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_gather_sparse_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..ce82f488919395f4a42b971fe593500f8d63a399 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_gather_sparse_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor +TORCH_API inline at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { + return at::_ops::_gather_sparse_backward::call(self, dim, index, grad); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_inv_out_helper_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_inv_out_helper_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7a56234b688294f4d1feede007561882a2b8b8c8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_inv_out_helper_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & _linalg_inv_out_helper_(at::Tensor & self, at::Tensor & infos_lu, at::Tensor & infos_getri); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_svd.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_svd.h new file mode 100644 index 0000000000000000000000000000000000000000..a8df2d68dfb2fc0c1d85b8fc631c77c2ca196ae3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_svd.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor Vh) +TORCH_API inline ::std::tuple _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true) { + return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv); +} + +// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +TORCH_API inline ::std::tuple _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true) { + return at::_ops::_linalg_svd_U::call(A, full_matrices, compute_uv, U, S, Vh); +} + +// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +TORCH_API inline ::std::tuple _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::_linalg_svd_U::call(A, full_matrices, compute_uv, U, S, Vh); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_optional_floatlist_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_optional_floatlist_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..343ad88fc858c9f898427a47768061e9c7cf6e34 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_optional_floatlist_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_optional_floatlist { + using schema = at::Tensor (const at::Tensor &, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_optional_floatlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor") + static at::Tensor call(const at::Tensor & values, c10::optional> addends); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df7c2724257217c81820c1c553ca43760a1bac9b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_string_default_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\"); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_to_cpu_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_to_cpu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c283d3bf38b7e25a0a89b3a445daf77a6179b1d8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_to_cpu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _to_cpu { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_cpu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_cpu(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_unpack_dual_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_unpack_dual_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dd35e468215e6bfadbcb2398e866c0ed0f1f85e5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_unpack_dual_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unpack_dual { + using schema = ::std::tuple (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unpack_dual") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)") + static ::std::tuple call(const at::Tensor & dual, int64_t level); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b15547fa77f18f61f3d93ccf241373366a11838d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors); +TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..98a21fdc8fa4da08b57a93e30f84bdb1451ea366 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_use_cudnn_ctc_loss_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API bool _use_cudnn_ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bff21c5ae189fb0cbb7eac98864e3715b81e55da --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d1b3c8e5b72945c9ed2e4f4fd10155f81dec7e82 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/all_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/all_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..45becf365cc5cc402cee679fbb871f72bc18d199 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/all_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor all(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor all(const at::Tensor & self); +TORCH_API at::Tensor & all_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & all_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/allclose_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/allclose_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4ad0780d37166bb98bcfd891d2e4d260b58f1ee9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/allclose_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API bool allclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/aminmax.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/aminmax.h new file mode 100644 index 0000000000000000000000000000000000000000..770f71865fb961f6ff4d10accf3dae3a298e86cb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/aminmax.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) +TORCH_API inline ::std::tuple aminmax(const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::aminmax::call(self, dim, keepdim); +} + +// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) +TORCH_API inline ::std::tuple aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::aminmax_out::call(self, dim, keepdim, min, max); +} + +// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) +TORCH_API inline ::std::tuple aminmax_outf(const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & min, at::Tensor & max) { + return at::_ops::aminmax_out::call(self, dim, keepdim, min, max); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/any_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/any_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3c015933d90bcbcf2a85e7fd3094c7253df99338 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/any_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor any(const at::Tensor & self, int64_t dim, bool keepdim=false); +TORCH_API at::Tensor any(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..7986a233e891d74f16c245eaec4087d1f4bf57a8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & avg_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool2d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); +} + +// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & avg_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input) { + return at::_ops::avg_pool2d_backward_grad_input::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); +} + +// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor +TORCH_API inline at::Tensor avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bitwise_not.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bitwise_not.h new file mode 100644 index 0000000000000000000000000000000000000000..41d430036c16feebc70e7bd0288ff790edb9dc09 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bitwise_not.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::bitwise_not(Tensor self) -> Tensor +TORCH_API inline at::Tensor bitwise_not(const at::Tensor & self) { + return at::_ops::bitwise_not::call(self); +} + +// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & bitwise_not_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::bitwise_not_out::call(self, out); +} + +// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & bitwise_not_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::bitwise_not_out::call(self, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/channel_shuffle_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/channel_shuffle_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bc98831922607b90f0b237b6676117071a0b5684 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/channel_shuffle_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cholesky_inverse_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cholesky_inverse_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..db657f7ea744f7ca01a0e0aea35c9c20326063dd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cholesky_inverse_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cholesky_inverse(const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & cholesky_inverse_out(at::Tensor & out, const at::Tensor & self, bool upper=false); +TORCH_API at::Tensor & cholesky_inverse_outf(const at::Tensor & self, bool upper, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/choose_qparams_optimized_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/choose_qparams_optimized_native.h new file mode 100644 index 0000000000000000000000000000000000000000..949813c0a00a17468b9b9e85b0a589f9a35ba03e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/choose_qparams_optimized_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::tuple choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/clamp_min_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/clamp_min_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d60c7ec2b7da3b79674a36f8314be7637b85fa95 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/clamp_min_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor clamp_min(const at::Tensor & self, const at::Scalar & min); +TORCH_API at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min); +TORCH_API at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Scalar & min, at::Tensor & out); +TORCH_API at::Tensor & clamp_min_(at::Tensor & self, const at::Scalar & min); +TORCH_API at::Tensor clamp_min(const at::Tensor & self, const at::Tensor & min); +TORCH_API at::Tensor & clamp_min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & min); +TORCH_API at::Tensor & clamp_min_outf(const at::Tensor & self, const at::Tensor & min, at::Tensor & out); +TORCH_API at::Tensor & clamp_min_(at::Tensor & self, const at::Tensor & min); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/complex_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/complex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcf615a42e94eefcc2e849806fbaa9f6a092674 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/complex_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & complex_out(at::Tensor & out, const at::Tensor & real, const at::Tensor & imag); +TORCH_API at::Tensor & complex_outf(const at::Tensor & real, const at::Tensor & imag, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv3d.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv3d.h new file mode 100644 index 0000000000000000000000000000000000000000..a8ef95c513e50da40e590f1423fd22b08e9f4ebe --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv3d.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor +TORCH_API inline at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups); +} + +// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor +TORCH_API inline at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cos_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cos_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7a9bcc9848862cb95b62a8bfe3b59cd41d089ab9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cos_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor cos(const at::Tensor & self); +TORCH_API at::Tensor & cos_(at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bd02b3094cb7f0d2b5f3a25593ddff718c7bf54 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cummaxmin_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/feature_alpha_dropout_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/feature_alpha_dropout_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2faa068a650b27826964455a5494e62486a8aca0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/feature_alpha_dropout_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API feature_alpha_dropout { + using schema = at::Tensor (const at::Tensor &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::feature_alpha_dropout") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor") + static at::Tensor call(const at::Tensor & input, double p, bool train); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train); +}; + +struct TORCH_API feature_alpha_dropout_ { + using schema = at::Tensor & (at::Tensor &, double, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::feature_alpha_dropout_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double p, bool train); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/from_file.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/from_file.h new file mode 100644 index 0000000000000000000000000000000000000000..d306f25a141246a3576fdc439ca3351704522c2d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/from_file.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +TORCH_API inline at::Tensor from_file(c10::string_view filename, c10::optional shared=c10::nullopt, c10::optional size=0, at::TensorOptions options={}) { + return at::_ops::from_file::call(filename, shared, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +TORCH_API inline at::Tensor from_file(c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::from_file::call(filename, shared, size, dtype, layout, device, pin_memory); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5615ab89f3eaf2e83c745e344afeb74c8bd4ffd4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ff02f0756280681b89748b14df667fd501ff374 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/grid_sampler_3d_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple grid_sampler_3d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/huber_loss.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/huber_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..23f7601c554a2365b4f07a23caa1fba42f659a7a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/huber_loss.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} + +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { + return at::_ops::huber_loss_out::call(self, target, reduction, delta, out); +} + +// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor +TORCH_API inline at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss::call(self, target, reduction, delta); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/igamma_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/igamma_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8b3afc2f1f5d95c8dc5e7814c1a8b5e50ac9f7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/igamma_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igamma : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/index_reduce_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/index_reduce_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..ae24635a7bc3f11c1688bf2e39f0f7d96e9d4016 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/index_reduce_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_reduce : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9a7b4b971e028ae74fbd9d27886021a63f2a1393 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_signed_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool is_signed(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isclose.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isclose.h new file mode 100644 index 0000000000000000000000000000000000000000..9cd21a36fc181e67b5096de0d9635709b9c52f5a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isclose.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +TORCH_API inline at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::call(self, other, rtol, atol, equal_nan); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6648c6622ef814ee942cb359f17150185f8eac08 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/item_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Scalar item(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3e584ba74bba0d7f11b7de0e3b04fd2c6da0ce7f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lcm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0082471d2ec99184c21f874f7bfcff3e0d6c9867 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lcm_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor lcm(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & lcm_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & lcm_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20aa6e98c30c69ec0a4a49c77f72bf736683803b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_solve_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8eb6c956e95402df4a746537705f7368146e30ff --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_solve_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve(Tensor input, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & other); +}; + +struct TORCH_API linalg_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve.out(Tensor input, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..4d2bcf2e7d4f06aaa62cd7bf40cc110ebfa4299f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/log_sigmoid_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & log_sigmoid_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input); +} + +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & log_sigmoid_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) { + return at::_ops::log_sigmoid_backward_grad_input::call(grad_output, self, buffer, grad_input); +} + +// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor +TORCH_API inline at::Tensor log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logaddexp2_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logaddexp2_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4db12093d1ce93dc036c4af6083863881116204 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/logaddexp2_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor logaddexp2(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..33a48f21e220762c6041677d52a654dffef51ad9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lu_unpack_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple lu_unpack(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true); +TORCH_API ::std::tuple lu_unpack_outf(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_unpool2d_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_unpool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f151667cfd74daedef71604f308198ae86d20d70 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_unpool2d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor max_unpooling2d_forward_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpooling2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor max_unpooling2d_forward_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size); +TORCH_API at::Tensor & max_unpooling2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/min_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/min_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3385923bc75f293ea2dfeb5cf0f6a317f3d4419b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/min_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple min(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple min_outf(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices); +TORCH_API at::Tensor min(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & min_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & min_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_convolution.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..59d66c7513d686db4c98ee794fdf2bd954c363a4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_convolution.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +TORCH_API inline at::Tensor miopen_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fc7feca774b86134df72eca77939bd243a8a7bf1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/miopen_rnn_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_rnn_backward { + using schema = ::std::tuple> (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const at::Tensor &, const c10::optional &, const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, const at::Tensor &, ::std::array); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])") + static ::std::tuple> call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); + static ::std::tuple> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear.h new file mode 100644 index 0000000000000000000000000000000000000000..592c38f53faf79af11bbdf6bca4187be70bb66cf --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor +TORCH_API inline at::Tensor mkldnn_linear(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::mkldnn_linear::call(self, weight, bias); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..41a110d0d5446d7761f090c535056fad898e5fc7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_linear_backward_weights { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_linear_backward_weights") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mse_loss_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mse_loss_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8daf060b1e86936d775c5e51b2bf297593ed3dda --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mse_loss_backward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +TORCH_API at::Tensor & mse_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..879c02b821d352054574f2f3f0871621d8b97361 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad3d_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1115432afc0eb9760bf869870c2ca59cd8b89681 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/reflection_pad3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor reflection_pad3d(const at::Tensor & self, at::IntArrayRef padding); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sgn.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sgn.h new file mode 100644 index 0000000000000000000000000000000000000000..da65c68a35b99e42dc04bd80a818bbb86eb81709 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sgn.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sgn(Tensor self) -> Tensor +TORCH_API inline at::Tensor sgn(const at::Tensor & self) { + return at::_ops::sgn::call(self); +} + +// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sgn_out::call(self, out); +} + +// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sgn_out::call(self, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sgn_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sgn_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..62e336bbddd77a87be3eb84d84730cf9a10b9a9a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sgn_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor sgn(const at::Tensor & self); +TORCH_API at::Tensor & sgn_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sgn_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sgn_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/silu_backward_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/silu_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd254bf37ebfe3b285cf199aa223434d9360615a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/silu_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor silu_backward(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & silu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & silu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c5eb87e7a7abeb41257eb5c2b63eb9229a0f2f88 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor slice_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_copy_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fd745c241ed27ff1d339ec019eee4c731cc028a4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API slice_copy_Tensor { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slice_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slice_copy.Tensor(Tensor self, int dim=0, int? start=None, int? end=None, int step=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step); +}; + +struct TORCH_API slice_copy_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, c10::optional, c10::optional, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::slice_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "slice_copy.Tensor_out(Tensor self, int dim=0, int? start=None, int? end=None, int step=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..72baaa7c7470f189b3dbf0fc731771d31991da1e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/softshrink_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API softshrink_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softshrink_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input); +}; + +struct TORCH_API softshrink_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::softshrink_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b64eac2e58168dcc25814fa7eb617b4ffcc007f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_log_softmax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tanh.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tanh.h new file mode 100644 index 0000000000000000000000000000000000000000..2a3287da50856fa27f59a0ddb14e534a94e01bb3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tanh.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::tanh(Tensor self) -> Tensor +TORCH_API inline at::Tensor tanh(const at::Tensor & self) { + return at::_ops::tanh::call(self); +} + +// aten::tanh_(Tensor(a!) self) -> Tensor(a!) +TORCH_API inline at::Tensor & tanh_(at::Tensor & self) { + return at::_ops::tanh_::call(self); +} + +// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & tanh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::tanh_out::call(self, out); +} + +// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & tanh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::tanh_out::call(self, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/thnn_conv2d.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/thnn_conv2d.h new file mode 100644 index 0000000000000000000000000000000000000000..1125de7ad7f3659b648121134659c27d7090b6f1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/thnn_conv2d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & thnn_conv2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} + +// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & thnn_conv2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::call(self, weight, kernel_size, bias, stride, padding, out); +} + +// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor +TORCH_API inline at::Tensor thnn_conv2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/to_sparse_csr_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/to_sparse_csr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ac8f3bf4d223510332e9d198bea5b9c8ab27b3f9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/to_sparse_csr_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor to_sparse_csr(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/topk.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/topk.h new file mode 100644 index 0000000000000000000000000000000000000000..4ea81f1399371f906c4858a5b75ae4577d342787 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/topk.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +TORCH_API inline ::std::tuple topk_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk_values::call(self, k, dim, largest, sorted, values, indices); +} + +// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +TORCH_API inline ::std::tuple topk_outf(const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) { + return at::_ops::topk_values::call(self, k, dim, largest, sorted, values, indices); +} + +// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +TORCH_API inline ::std::tuple topk(const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk::call(self, k, dim, largest, sorted); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/transpose.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..9c86ea5fd5285323dc9c0e7896dc3b3aea626cdd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/transpose.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +TORCH_API inline at::Tensor transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_int::call(self, dim0, dim1); +} + +// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) +TORCH_API inline at::Tensor transpose(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { + return at::_ops::transpose_Dimname::call(self, dim0, dim1); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trapezoid_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trapezoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8bda2029a8c884696b9d0468a7f56c8d1d455014 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trapezoid_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API trapezoid_x { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trapezoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, const at::Tensor & x, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim); +}; + +struct TORCH_API trapezoid_dx { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::trapezoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dx") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, const at::Scalar & dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tril_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tril_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..73546783d411935e2414458e84da3d473d05c509 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tril_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tril_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tril_native.h new file mode 100644 index 0000000000000000000000000000000000000000..023df5bf5a1d97bcd0d8be2b76193b0d9a5f7d5f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tril_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_tril_cpu : public at::meta::structured_tril { +void impl(const at::Tensor & self, int64_t diagonal, const at::Tensor & out); +}; +struct TORCH_API structured_tril_cuda : public at::meta::structured_tril { +void impl(const at::Tensor & self, int64_t diagonal, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/type_as.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/type_as.h new file mode 100644 index 0000000000000000000000000000000000000000..5416ef0a9ea26ee9d8ae8d08cf4da0282409833c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/type_as.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52ae5477a06c5a127dad9024667bbb13bf840103 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b743383ccb317bac032415b2c18f3321b6051a90 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_bilinear2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bilinear2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..febae135e85bb19c25021b51589d024afc5fc9ff --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +TORCH_API at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors); +struct TORCH_API structured_upsample_nearest1d_backward_out_cpu : public at::meta::structured_upsample_nearest1d_backward { +void impl(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_nearest1d_backward_out_cuda : public at::meta::structured_upsample_nearest1d_backward { +void impl(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, const at::Tensor & grad_input); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_as_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..717968da9a0c961fecde28001df29518bff7201e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_as_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor view_as(const at::Tensor & self, const at::Tensor & other); + +} // namespace native +} // namespace at diff --git a/outputs_difftumor_atlas_test/BDMAP_00007902/ct.nii.gz b/outputs_difftumor_atlas_test/BDMAP_00007902/ct.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..0ee38b37cc5d46d0f133ea2c9a24ccd6b9ab0ba1 --- /dev/null +++ b/outputs_difftumor_atlas_test/BDMAP_00007902/ct.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60fe73a1bd3562958e06d151330853948982c5fab092aadc94a13343a3a30d15 +size 145969283