diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_add_batch_dim.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_add_batch_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..2f0f2df77f74adc16ee317ecceb26e89fc2065ca --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_add_batch_dim.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor +TORCH_API inline at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) { + return at::_ops::_add_batch_dim::call(self, batch_dim, level); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_forward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..88bfba3b56d7237a95f98d6d197124ba40ce47fa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_forward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor +TORCH_API inline at::Tensor _cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h new file mode 100644 index 0000000000000000000000000000000000000000..6e5b7adda516c7310d6f015079440b063d40dd3f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_empty_per_channel_affine_quantized.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor +TORCH_API inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} + +// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor +TORCH_API inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_fft_c2r_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_fft_c2r_native.h new file mode 100644 index 0000000000000000000000000000000000000000..15b8726206b09f1a82dec36a0d128ad5b8d6452f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_fft_c2r_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _fft_c2r_mkl(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size); +TORCH_API at::Tensor & _fft_c2r_mkl_out(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out); +TORCH_API at::Tensor _fft_c2r_cufft(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size); +TORCH_API at::Tensor & _fft_c2r_cufft_out(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_asin_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_asin_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..515dad79d9c1bb7b50553905051798624befee33 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_asin_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_asin { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_asin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_asin(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API _foreach_asin_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_asin_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_asin_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_asin_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_asin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +struct TORCH_API _foreach_asin_functional { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_asin") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_asin.functional(Tensor[] self) -> Tensor[] self_out") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log2.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log2.h new file mode 100644 index 0000000000000000000000000000000000000000..a6db9f187e1b5f3b504af1adc038e2885b00d700 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log2.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_log2(Tensor[] tensors) -> Tensor[] +TORCH_API inline ::std::vector _foreach_log2(at::TensorList tensors) { + return at::_ops::_foreach_log2::call(tensors); +} + +// aten::_foreach_log2_(Tensor(a!)[] self) -> () +TORCH_API inline void _foreach_log2_(at::TensorList self) { + return at::_ops::_foreach_log2_::call(self); +} + +// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_log2_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log2_out::call(self, out); +} + +// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_log2_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log2_out::call(self, out); +} + +// aten::_foreach_log2.functional(Tensor[] self) -> Tensor[] self_out +TORCH_API inline ::std::vector _foreach_log2_functional(at::TensorList self) { + return at::_ops::_foreach_log2_functional::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sin.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sin.h new file mode 100644 index 0000000000000000000000000000000000000000..ede8a908a1ad607b5d0664866867c2e8a2d40541 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sin.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sin(Tensor[] tensors) -> Tensor[] +TORCH_API inline ::std::vector _foreach_sin(at::TensorList tensors) { + return at::_ops::_foreach_sin::call(tensors); +} + +// aten::_foreach_sin_(Tensor(a!)[] self) -> () +TORCH_API inline void _foreach_sin_(at::TensorList self) { + return at::_ops::_foreach_sin_::call(self); +} + +// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_sin_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sin_out::call(self, out); +} + +// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_sin_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sin_out::call(self, out); +} + +// aten::_foreach_sin.functional(Tensor[] self) -> Tensor[] self_out +TORCH_API inline ::std::vector _foreach_sin_functional(at::TensorList self) { + return at::_ops::_foreach_sin_functional::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b3490f836edf63eda16bce33b0dbb269584a371 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_sqrt(at::TensorList tensors); +TORCH_API void _foreach_sqrt_(at::TensorList self); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_fused_dropout.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_fused_dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..61e29831c6a0382f69abfaf4f42a9a08c6bf26f7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_fused_dropout.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) +TORCH_API inline ::std::tuple _fused_dropout(const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::_fused_dropout::call(self, p, generator); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_check_errors.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_check_errors.h new file mode 100644 index 0000000000000000000000000000000000000000..16f316c8fec324abaaf26f56ecb87b6015eee949 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_linalg_check_errors.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () +TORCH_API inline void _linalg_check_errors(const at::Tensor & info, c10::string_view api_name, bool is_matrix) { + return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2ce01c401096a81e9158ffdd48a6424fdf3c287a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sample_dirichlet_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sample_dirichlet_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..63e75e62de8f6a356aeaa1876a0de82da7a0c1c3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sample_dirichlet_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _sample_dirichlet(const at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2433411da053e76d8351ce47dedd6d46bebe8786 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_optional_floatlist_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _test_optional_floatlist(const at::Tensor & values, c10::optional> addends); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c2991f43e7d8b3ce6c764330d811131416f83bb5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_warn_in_autograd { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_warn_in_autograd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_warn_in_autograd(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_weight_norm_interface_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_weight_norm_interface_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..eeec12acd1cafa9c6582c529b1e85ba1796552f7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_weight_norm_interface_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) +TORCH_API inline ::std::tuple _weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..c844231d30fe314fd68a4abd667acfcfcd3bee64 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor +TORCH_API inline at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ade264c4eb637ff26e8166f2aaa172f8bbcb558 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/add_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/and_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/and_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0eee8491cf475d2abe798a5c091cbb5a180b6ac1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/and_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API __and___Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__and__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__and__.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __and___Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__and__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__and__.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API __iand___Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__iand__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API __iand___Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::__iand__") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atan_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atan_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c883c3add1e2f373249aebe77cfe497ac36fc9c0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atan_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_atan : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ec584ce60f0ff0938de5c63119911c862253ef4d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atleast_1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor atleast_1d(const at::Tensor & self); +TORCH_API ::std::vector atleast_1d(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..18b8eaeb23298fb9a5dd7df73eeabab8d252ecaa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_backward_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_avg_pool2d_backward_out_cpu : public at::meta::structured_avg_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, const at::Tensor & grad_input); +}; +struct TORCH_API structured_avg_pool2d_backward_out_cuda : public at::meta::structured_avg_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, const at::Tensor & grad_input); +}; +TORCH_API at::Tensor mkldnn_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +TORCH_API at::Tensor & mkldnn_avg_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/block_diag.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/block_diag.h new file mode 100644 index 0000000000000000000000000000000000000000..20edce065c4ab4cda2f748120eaffe99e66f69af --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/block_diag.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::block_diag(Tensor[] tensors) -> Tensor +TORCH_API inline at::Tensor block_diag(at::TensorList tensors) { + return at::_ops::block_diag::call(tensors); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..379d1b7a5afe03d1bff0118f71418db7c0d00e89 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/chain_matmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor chain_matmul(at::TensorList matrices); +TORCH_API at::Tensor & chain_matmul_out(at::Tensor & out, at::TensorList matrices); +TORCH_API at::Tensor & chain_matmul_outf(at::TensorList matrices, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conj_physical.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conj_physical.h new file mode 100644 index 0000000000000000000000000000000000000000..341228a17224fd47b74540d4ca3e532ac1324758 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conj_physical.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conj_physical(Tensor self) -> Tensor +TORCH_API inline at::Tensor conj_physical(const at::Tensor & self) { + return at::_ops::conj_physical::call(self); +} + +// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::conj_physical_out::call(self, out); +} + +// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::conj_physical_out::call(self, out); +} + +// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) +TORCH_API inline at::Tensor & conj_physical_(at::Tensor & self) { + return at::_ops::conj_physical_::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..08be2f2ceb0e394dd97e96bbdfcf2be610ddd631 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conv_transpose3d_input { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_transpose3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cov_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cov_native.h new file mode 100644 index 0000000000000000000000000000000000000000..41e84938de4d6271996e9de03e237cd8395aec2b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cov_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor cov(const at::Tensor & self, int64_t correction=1, const c10::optional & fweights={}, const c10::optional & aweights={}); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_convolution_add_relu_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_convolution_add_relu_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..006ba2c43db24e6a3126a83192b59e89ff2ea2ae --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_convolution_add_relu_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_convolution_add_relu { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_convolution_add_relu") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cumprod_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cumprod_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..62999808ba678247495a5b3f7babac0bf7db5469 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cumprod_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor cumprod(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumprod_outf(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumprod_(at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/digamma_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/digamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..47b5301daa3eb7f49c3640f1b2e7f9ef7a95cc61 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/digamma_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_digamma_out : public at::meta::structured_digamma { +void impl(const at::Tensor & self, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/dsplit_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/dsplit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..02b4b6cabe12327f746c6b489dda2470fbb2cbcb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/dsplit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector dsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector dsplit(const at::Tensor & self, at::IntArrayRef indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/erfinv_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/erfinv_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..764e4120340797b9a9ab49409fa273c4796cb26c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/erfinv_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_erfinv : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_fftfreq_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_fftfreq_native.h new file mode 100644 index 0000000000000000000000000000000000000000..08234ba60f53ef053ce157cc8c7505a2bee85dfc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_fftfreq_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor fft_fftfreq(int64_t n, double d=1.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & fft_fftfreq_out(int64_t n, double d, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..61133ba01d6c0d2477277d42ac8a61af094853cb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ihfft2_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_ihfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API const at::Tensor & fft_ihfft2_out(const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt); +TORCH_API const at::Tensor & fft_ihfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/floor_divide_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/floor_divide_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..babb8013a3f7ecbd9059e2fed69738bfee681134 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/floor_divide_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Scalar & other); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/frobenius_norm_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/frobenius_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c5bd666d114078c95015408748d7bb26383d5792 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/frobenius_norm_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self); +TORCH_API at::Tensor frobenius_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & frobenius_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc86fcea7422f130b2fe9a101b017823f30e659c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/group_norm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor group_norm(const at::Tensor & input, int64_t num_groups, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..89834dee1d41873ba12d2cb18b059c006bf75b4e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/heaviside_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor heaviside(const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & values); +TORCH_API at::Tensor & heaviside_outf(const at::Tensor & self, const at::Tensor & values, at::Tensor & out); +TORCH_API at::Tensor & heaviside_(at::Tensor & self, const at::Tensor & values); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6e7d4bccbbbb214b5a16b7d9bcddd7ac8aa7ff65 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hstack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor hstack(at::TensorList tensors); +TORCH_API at::Tensor & hstack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & hstack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_floating_point_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_floating_point_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7985002a7a2a17264da404c596cdec3d4ec93a71 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_floating_point_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_floating_point { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_floating_point") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_floating_point(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_neg.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_neg.h new file mode 100644 index 0000000000000000000000000000000000000000..81cdb32c8e6ef2e99ef6a75a2565d0d2b02710c2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_neg.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_neg(Tensor self) -> bool +TORCH_API inline bool __dispatch_is_neg(const at::Tensor & self) { + return at::_ops::is_neg::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5fb7386712a7fe8a0f421f01f2909832ad6481a1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/log10_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/log10_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e24969edc2897682b90c788a55a47214232a4e5a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/log10_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +TORCH_API at::Tensor log10(const at::Tensor & self); +struct TORCH_API structured_log10_out : public at::meta::structured_log10 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lt.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lt.h new file mode 100644 index 0000000000000000000000000000000000000000..e114edf875344643b2c3c9da05a2462a82690d5a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lt.h @@ -0,0 +1,55 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar_out::call(self, other, out); +} + +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & lt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::lt_Scalar_out::call(self, other, out); +} + +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +TORCH_API inline at::Tensor lt(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar::call(self, other); +} + +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & lt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor_out::call(self, other, out); +} + +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & lt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lt_Tensor_out::call(self, other, out); +} + +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +TORCH_API inline at::Tensor lt(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor::call(self, other); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..5b646e2eeeda2975144378e6c0872721af3e3cff --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_pool1d_with_indices.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +TORCH_API inline ::std::tuple max_pool1d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/maximum_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/maximum_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8888437aec7d28a6a7e1095540005ab586533bd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/maximum_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mean.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mean.h new file mode 100644 index 0000000000000000000000000000000000000000..6b7ad64347eadc948a1473b42ab2e2fc689fc53b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mean.h @@ -0,0 +1,60 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +TORCH_API inline at::Tensor mean(const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::mean::call(self, dtype); +} + +// aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +TORCH_API inline at::Tensor mean(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_dim::call(self, dim, keepdim, dtype); +} + +// aten::mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_out::call(self, dim, keepdim, dtype, out); +} + +// aten::mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & mean_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::mean_out::call(self, dim, keepdim, dtype, out); +} + +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +TORCH_API inline at::Tensor mean(const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype); +} + +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out); +} + +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & mean_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::mean_names_out::call(self, dim, keepdim, dtype, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46ce1f86e09a60ab5cd16c3e6f7c0e8e15032078 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/median_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple median(const at::Tensor & self, int64_t dim, bool keepdim=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mm.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mm.h new file mode 100644 index 0000000000000000000000000000000000000000..b2fd9790e7c3fd4e453e11be27cd00c9925954b4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mm(Tensor self, Tensor mat2) -> Tensor +TORCH_API inline at::Tensor mm(const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::mm::call(self, mat2); +} + +// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & mm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::mm_out::call(self, mat2, out); +} + +// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & mm_outf(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::mm_out::call(self, mat2, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/multinomial_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/multinomial_native.h new file mode 100644 index 0000000000000000000000000000000000000000..538547366bdc0e9f857339ad70c63bdecf6fda92 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/multinomial_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & multinomial_out(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_group_norm_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_group_norm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6382bd9149ae61937fb0eaac3b884d1a138df4e0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_group_norm_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple native_group_norm(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_layer_norm_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_layer_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2bb1074aab0c37d5da8bbc3f2ef9328dc114e186 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_layer_norm_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::tuple math_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); +TORCH_API ::std::tuple layer_norm_cpu(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); +TORCH_API ::std::tuple layer_norm_cuda(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/new_full_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/new_full_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07b99c9f9bc346d9b15ddb6ce6ce11a427704e11 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/new_full_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}); +TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b0a7712ed1c2a8e9e7e5c5e12ae7ac4cd89e5c42 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss_forward_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_nll_loss_forward_out_cpu : public at::meta::structured_nll_loss_forward { +void impl(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & output, const at::Tensor & total_weight); +}; +struct TORCH_API structured_nll_loss_forward_out_cuda : public at::meta::structured_nll_loss_forward { +void impl(const at::Tensor & self, const at::Tensor & target, at::OptionalTensorRef weight, int64_t reduction, int64_t ignore_index, const at::Tensor & output, const at::Tensor & total_weight); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9906a613c8f40c0b52e882b6967263c39b4415f5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pad_sequence_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor pad_sequence(at::TensorList sequences, bool batch_first=false, double padding_value=0.0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/permute_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/permute_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ee1b863dd039c66397d389328d123d093b63596c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/permute_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API permute { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::permute") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "permute(Tensor(a) self, int[] dims) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dims); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pinverse.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pinverse.h new file mode 100644 index 0000000000000000000000000000000000000000..728794eebccbd99415e01047ebfc78f359233e1c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pinverse.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +TORCH_API inline at::Tensor pinverse(const at::Tensor & self, double rcond=1e-15) { + return at::_ops::pinverse::call(self, rcond); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/poisson_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/poisson_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..56903bf9b78ebed59c6c9b8bfda5a24943bc9771 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/poisson_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API poisson { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::poisson") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "poisson(Tensor self, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d49efed9a075b1bb7f2e52012367c3473857544f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor q_per_channel_scales(const at::Tensor & self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/repeat_interleave_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/repeat_interleave_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..78d50b8af5fd155558e75e294817dedb177d82a2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/repeat_interleave_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional output_size=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_reduce.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..36b49282e490254401ddf0d9c1893a1e71cb740d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_reduce.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor +TORCH_API inline at::Tensor scatter_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self); +} + +// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & scatter_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two_out::call(self, dim, index, src, reduce, include_self, out); +} + +// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & scatter_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::scatter_reduce_two_out::call(self, dim, index, src, reduce, include_self, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/searchsorted_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/searchsorted_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..032021e8183668c873d78d1bd877445d1dfbcf36 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/searchsorted_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); +TORCH_API at::Tensor & searchsorted_out(at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); +TORCH_API at::Tensor & searchsorted_outf(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); +TORCH_API at::Tensor searchsorted(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sinh_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sinh_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0e8de2d784788c36b4d62e1e4d741e1a54100618 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sinh_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sinh(const at::Tensor & self); +TORCH_API at::Tensor & sinh_(at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..955fca8fd0ebd50422e6d8b7b236a54022be016f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor slice_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_i1_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_i1_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7504ffc7a966306c574a6fd629595cacfe293b7b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_i1_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_i1(const at::Tensor & self); +TORCH_API at::Tensor & special_i1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sqrt_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sqrt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7be1ba026f21fcd83b33a73e364ec3692408636b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sqrt_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sqrt { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sqrt(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sqrt_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sqrt_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sqrt_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sqrt_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/std_mean_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/std_mean_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f06f52d57da4d0dde9a9d2fc007a1417ff558266 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/std_mean_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::tuple std_mean(const at::Tensor & self, bool unbiased=true); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::IntArrayRef dim, bool unbiased=true, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false); +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sum_to_size_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sum_to_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6172cf5b4535e4175374014bd39652c0087fc3c0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sum_to_size_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor sum_to_size(const at::Tensor & self, at::IntArrayRef size); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/svd_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/svd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f92a4b7fd10eb9a9f85f9ba0f55dddb0831fe436 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/svd_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API svd_U { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::svd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "U") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)") + static ::std::tuple call(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V); +}; + +struct TORCH_API svd { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::svd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)") + static ::std::tuple call(const at::Tensor & self, bool some, bool compute_uv); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tan_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tan_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..f483fe3e497db769961e8ec03c98dc9269df08eb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/tan_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_tan : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/unbind_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/unbind_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..771af8c02bbbf3a0da35a7f8128c1630af6c12e0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/unbind_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector unbind(const at::Tensor & self, int64_t dim=0); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e49198b54052b9444da7fa7d879688dc151d72d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bilinear2d_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors); +TORCH_API at::Tensor upsample_bilinear2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6901af6adf963d953c0eab9ba481c66847f2c2af --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_nearest3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor +TORCH_API inline at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_backward_vec::call(grad_output, output_size, input_size, scale_factors); +} + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); +} + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +TORCH_API inline at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); +} + +// aten::upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +TORCH_API inline at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h new file mode 100644 index 0000000000000000000000000000000000000000..7352997ffb3b38f1bac2af7ea65e6f57d78b68da --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_trilinear3d.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +TORCH_API inline at::Tensor upsample_trilinear3d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors); +} + +// aten::upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & upsample_trilinear3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); +} + +// aten::upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & upsample_trilinear3d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::call(self, output_size, align_corners, scales_d, scales_h, scales_w, out); +} + +// aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +TORCH_API inline at::Tensor upsample_trilinear3d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_copy_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c48925d33041acf9e21230f8f3760b3b74cc4a14 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_copy_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API view_copy { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_copy(Tensor self, int[] size) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size); +}; + +struct TORCH_API view_copy_dtype { + using schema = at::Tensor (const at::Tensor &, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::ScalarType dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype); +}; + +struct TORCH_API view_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); +}; + +struct TORCH_API view_copy_dtype_out { + using schema = at::Tensor & (const at::Tensor &, at::ScalarType, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dtype_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..807f031a2edb63b47d50e6351ad8437f003470e2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size); + +} // namespace cpu +} // namespace at