diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb14e5b7deb4df6cc93d24772e3cb20579b2e808 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _autocast_to_reduced_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1a7a70bcd5dbe9c858c5368e59fef0251d9b3f05 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_coalesced_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_coalesced_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf650bcd08674d3520bf633e3615bfac662da4c0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_coalesced_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _coalesced_ { + using schema = at::Tensor & (at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesced_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, bool coalesced); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced); +}; + +struct TORCH_API _coalesced_out { + using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesced") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, bool coalesced, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out); +}; + +struct TORCH_API _coalesced_functional { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesced") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesced.functional(Tensor self, bool coalesced) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool coalesced); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_embedding_bag_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_embedding_bag_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f4f6d0427e13202193e9172e5ba9e6c1e182a7a1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_embedding_bag_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::tuple _embedding_bag_cpu(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); +TORCH_API ::std::tuple _embedding_bag_cuda(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_ceil.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_ceil.h new file mode 100644 index 0000000000000000000000000000000000000000..6ec44b1c10185b102b44d13d24ccbd9dc92e872c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_ceil.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_ceil(Tensor[] tensors) -> Tensor[] +TORCH_API inline ::std::vector _foreach_ceil(at::TensorList tensors) { + return at::_ops::_foreach_ceil::call(tensors); +} + +// aten::_foreach_ceil_(Tensor(a!)[] self) -> () +TORCH_API inline void _foreach_ceil_(at::TensorList self) { + return at::_ops::_foreach_ceil_::call(self); +} + +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_ceil_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_ceil_out::call(self, out); +} + +// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_ceil_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_ceil_out::call(self, out); +} + +// aten::_foreach_ceil.functional(Tensor[] self) -> Tensor[] self_out +TORCH_API inline ::std::vector _foreach_ceil_functional(at::TensorList self) { + return at::_ops::_foreach_ceil_functional::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0429d87eadd8b37f63fc1987e95046e6f86ac556 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_ceil_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_ceil_functional(at::TensorList self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1.h new file mode 100644 index 0000000000000000000000000000000000000000..79a81e3e332e25387119897bd01271a54cd29f76 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_expm1(Tensor[] tensors) -> Tensor[] +TORCH_API inline ::std::vector _foreach_expm1(at::TensorList tensors) { + return at::_ops::_foreach_expm1::call(tensors); +} + +// aten::_foreach_expm1_(Tensor(a!)[] self) -> () +TORCH_API inline void _foreach_expm1_(at::TensorList self) { + return at::_ops::_foreach_expm1_::call(self); +} + +// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_expm1_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_expm1_out::call(self, out); +} + +// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_expm1_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_expm1_out::call(self, out); +} + +// aten::_foreach_expm1.functional(Tensor[] self) -> Tensor[] self_out +TORCH_API inline ::std::vector _foreach_expm1_functional(at::TensorList self) { + return at::_ops::_foreach_expm1_functional::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_lgamma.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_lgamma.h new file mode 100644 index 0000000000000000000000000000000000000000..a2f81be0d288c6e326fda03ccd607310824cfd49 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_lgamma.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_lgamma(Tensor[] tensors) -> Tensor[] +TORCH_API inline ::std::vector _foreach_lgamma(at::TensorList tensors) { + return at::_ops::_foreach_lgamma::call(tensors); +} + +// aten::_foreach_lgamma_(Tensor(a!)[] self) -> () +TORCH_API inline void _foreach_lgamma_(at::TensorList self) { + return at::_ops::_foreach_lgamma_::call(self); +} + +// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_lgamma_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_lgamma_out::call(self, out); +} + +// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_lgamma_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_lgamma_out::call(self, out); +} + +// aten::_foreach_lgamma.functional(Tensor[] self) -> Tensor[] self_out +TORCH_API inline ::std::vector _foreach_lgamma_functional(at::TensorList self) { + return at::_ops::_foreach_lgamma_functional::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..df4a2d30713dc5d8ecdf325f6f82ea3946d7d7e7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log2_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_log2_functional(at::TensorList self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d76982da8e10ef9cc5aed61a47cf739cd9e4feac --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_log(at::TensorList tensors); +TORCH_API void _foreach_log_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sigmoid.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sigmoid.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8701a6241bf4304035ab7bd457f4303bb744d2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sigmoid.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sigmoid(Tensor[] tensors) -> Tensor[] +TORCH_API inline ::std::vector _foreach_sigmoid(at::TensorList tensors) { + return at::_ops::_foreach_sigmoid::call(tensors); +} + +// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () +TORCH_API inline void _foreach_sigmoid_(at::TensorList self) { + return at::_ops::_foreach_sigmoid_::call(self); +} + +// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_sigmoid_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sigmoid_out::call(self, out); +} + +// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () +TORCH_API inline void _foreach_sigmoid_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sigmoid_out::call(self, out); +} + +// aten::_foreach_sigmoid.functional(Tensor[] self) -> Tensor[] self_out +TORCH_API inline ::std::vector _foreach_sigmoid_functional(at::TensorList self) { + return at::_ops::_foreach_sigmoid_functional::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c9d5a3ea1acedf6f859bba2848b580051b202f71 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_csc_tensor_unsafe_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_sum_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_sum_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8ba784e302523aff5e065bece14836c6aa1badbf --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sparse_sum_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self); +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::ScalarType dtype); +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim); +TORCH_API at::Tensor _sparse_sum(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2464ca09e1416b25cd3beec54c4533c7d3451d5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addr_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addr_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7a60d99edf9ccafcaa9a1cb61a2f52afc0f1f1a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/addr_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addr(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addr_outf(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f41f767239274ab299ae444dd6539994f72fdde8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API affine_grid_generator { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::affine_grid_generator") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor") + static at::Tensor call(const at::Tensor & theta, at::IntArrayRef size, bool align_corners); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/alias_copy_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/alias_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fd28e7c3e071822d009b91dec6c27fdfe51bfa18 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/alias_copy_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor alias_copy(const at::Tensor & self); +TORCH_API at::Tensor & alias_copy_out(const at::Tensor & self, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/all_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/all_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a4f556d919b4a91874752f740a8c24f76faab2d3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/all_meta.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_all_dim : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, bool keepdim); +}; +struct TORCH_API structured_all : public at::impl::MetaBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dbd8a0612bea8fd38fedbf360d5ee31d1d80c69 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/atleast_2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor atleast_2d(const at::Tensor & self); +TORCH_API ::std::vector atleast_2d(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool3d_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool3d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..21c95b436243363d1bf648eeac59b895f878c7f6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool3d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); +TORCH_API at::Tensor & avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); +TORCH_API at::Tensor & avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..92d6e4ca7e72606f2b0c32deed4531c30671ff0a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor bernoulli(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor bernoulli_functional(const at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/block_diag_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/block_diag_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..734b2ae4fbcd9e1f7c85960fdc896674d74e39cd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/block_diag_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor block_diag(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/can_cast_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/can_cast_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9bae7cf7f363bc59068f9e5be61997c86b2cb04b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/can_cast_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool can_cast(at::ScalarType from, at::ScalarType to); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/col2im_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/col2im_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70c651be57fd81196e6b7a78da455e813c8480c3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/col2im_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor col2im_backward(const at::Tensor & grad_output, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & col2im_backward_outf(const at::Tensor & grad_output, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conj_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd47f05636b9c62ec6ea6e8bee6d14f016919629 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc2f0966d1cdedc3771df7f9e944fe2ba6bad928 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/convolution_overrideable_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor convolution_overrideable(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cos_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c5599bf67d909f3587e7e8d7821747e41d1317c9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cos_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_cos_out : public at::meta::structured_cos { +void impl(const at::Tensor & self, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3f8e8ad842708748e9055ed45fb5334ab04aa011 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/count_nonzero_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor count_nonzero(const at::Tensor & self, c10::optional dim=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0210e1bd0318ab772ce2ffb7047c0cef2ab6111c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::tuple cudnn_batch_norm_backward(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/dsplit.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/dsplit.h new file mode 100644 index 0000000000000000000000000000000000000000..13c0ef536351f05dc50b904e209a073efcf1414f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/dsplit.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +TORCH_API inline ::std::vector dsplit(const at::Tensor & self, int64_t sections) { + return at::_ops::dsplit_int::call(self, sections); +} + +// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +TORCH_API inline ::std::vector dsplit(const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::dsplit_array::call(self, indices); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/elu_backward_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/elu_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..49b7d676bbed0c2afed2e14fd88c2e0407ba4d34 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/elu_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor elu_backward(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/exponential_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/exponential_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f83c1130d8b8b0ecdbaf7cfa07683673d216bf8f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/exponential_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API exponential_ { + using schema = at::Tensor & (at::Tensor &, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exponential_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double lambd, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd, c10::optional generator); +}; + +struct TORCH_API exponential_out { + using schema = at::Tensor & (const at::Tensor &, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exponential") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double lambd, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API exponential_functional { + using schema = at::Tensor (const at::Tensor &, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exponential") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exponential.functional(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double lambd, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/floor_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/floor_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b5a7a81f6ff35e83040c8a006e13cfb61b236f3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/floor_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor floor(const at::Tensor & self); +TORCH_API at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fmax_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c42aa69f89b6fa837378d18924b66ae09f0a541d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fmax_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fmax { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmax(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API fmax_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e5bb8049724285cc8faf8b1143b840d41ee9d09 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hsplit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector hsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector hsplit(const at::Tensor & self, at::IntArrayRef indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/igamma_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/igamma_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7f72225a9e774c1e60c2e35d5268cf658999245d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/igamma_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API structured_igamma_out : public at::meta::structured_igamma { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/index_add_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/index_add_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1c850f68d1656f952cb57efd9d0aa75b83c37f17 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/index_add_meta.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_index_add : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_dim(int64_t value) { + static_assert(DIM == false, "dim already set"); + precompute_out ret; +ret.dim = value; +return ret; + } + + int64_t dim; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_vulkan_available.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_vulkan_available.h new file mode 100644 index 0000000000000000000000000000000000000000..6df5b7927413b2d77451541107c136f5d6acd04e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_vulkan_available.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::is_vulkan_available() -> bool +TORCH_API inline bool is_vulkan_available() { + return at::_ops::is_vulkan_available::call(); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ee95465e0ff7d2100d0c4ee50262e48127785e7c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4e7e3a6ae918f437a65a7efa32ebca8ad052dd9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor isreal(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3adc307190db636f1019c168d0c65b244780feca --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..66d775d333ea2e5c02d9a87db79d5c288c979fcc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9a642218aaff628ab37261d9493ddd607d36e28 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/lstm_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/matmul.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..d2f40bc73ef41a22afe115d5167cbf10d8d3ce10 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/matmul.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::matmul(Tensor self, Tensor other) -> Tensor +TORCH_API inline at::Tensor matmul(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul::call(self, other); +} + +// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & matmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul_out::call(self, other, out); +} + +// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & matmul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::matmul_out::call(self, other, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3e44a7d95d7916c651babd5a72bfffe570616ff2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::tuple mkldnn_linear_backward_weights(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mul_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mul_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d782ab7eb92a28508672316dfe4485a83af2be80 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mul_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor mul(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & mul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & mul_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & mul_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mvlgamma_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mvlgamma_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d0246bad2cf7bc1f828a3bb9af06d99b7ad97f31 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mvlgamma_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mvlgamma_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mvlgamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out); +}; + +struct TORCH_API mvlgamma { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mvlgamma") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mvlgamma(Tensor self, int p) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p); +}; + +struct TORCH_API mvlgamma_ { + using schema = at::Tensor & (at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mvlgamma_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t p); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_batch_norm_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_batch_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..06c902f79187c28e28bdd50f894e4fe4c9a1acf5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_batch_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_batch_norm { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); +}; + +struct TORCH_API native_batch_norm_out { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, const c10::optional &, const c10::optional &, bool, double, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_batch_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pow.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pow.h new file mode 100644 index 0000000000000000000000000000000000000000..8945464c9b291e5d502f3f7b93af461c486be455 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/pow.h @@ -0,0 +1,70 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +TORCH_API inline at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor::call(self, exponent); +} + +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor +TORCH_API inline at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar::call(self, exponent); +} + +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +TORCH_API inline at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar::call(self, exponent); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantile.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantile.h new file mode 100644 index 0000000000000000000000000000000000000000..1debaa6082e4b100cd6d51dc49fb294719e4381f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantile.h @@ -0,0 +1,55 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +TORCH_API inline at::Tensor quantile(const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile::call(self, q, dim, keepdim, interpolation); +} + +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out); +} + +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & quantile_outf(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_out::call(self, q, dim, keepdim, interpolation, out); +} + +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +TORCH_API inline at::Tensor quantile(const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation); +} + +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & quantile_out(at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} + +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & quantile_outf(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_scalar_out::call(self, q, dim, keepdim, interpolation, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantile_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantile_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b71980858bfa1643e5b5919b9e26db8ec3c20b4c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantile_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantile { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); +}; + +struct TORCH_API quantile_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, bool, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); +}; + +struct TORCH_API quantile_scalar { + using schema = at::Tensor (const at::Tensor &, double, c10::optional, bool, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor") + static at::Tensor call(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); +}; + +struct TORCH_API quantile_scalar_out { + using schema = at::Tensor & (const at::Tensor &, double, c10::optional, bool, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantile") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantize_per_tensor_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantize_per_tensor_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bf2635577eb4f024527e298639d848ed21f38437 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/quantize_per_tensor_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor quantize_per_tensor(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype); +TORCH_API at::Tensor quantize_per_tensor(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rnn_tanh.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rnn_tanh.h new file mode 100644 index 0000000000000000000000000000000000000000..6215cb09d5b0b21ddc6c3668e464a7b6d9fc6539 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rnn_tanh.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +TORCH_API inline ::std::tuple rnn_tanh(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +TORCH_API inline ::std::tuple rnn_tanh(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/round_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/round_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9bbeaafa3c77e61db5aa07001e76226a7840e2c1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/round_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API round { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "round(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API round_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::round_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "round_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API round_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +struct TORCH_API round_decimals { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "decimals") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "round.decimals(Tensor self, *, int decimals) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t decimals); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals); +}; + +struct TORCH_API round__decimals { + using schema = at::Tensor & (at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::round_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "decimals") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t decimals); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals); +}; + +struct TORCH_API round_decimals_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "decimals_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t decimals, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rshift_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cf6fb2f490096d97f30d0d05489e8a9dae649551 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/rshift_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor __rshift__(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __irshift__(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor __rshift__(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __irshift__(at::Tensor & self, const at::Tensor & other); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sinh_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sinh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..620dd3533f039b353694f20b084dc2079fcad2e0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sinh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sinh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sinh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sinh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sinh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sinh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sinh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..240bca98a7a44cfed26e6bc809d767f508639500 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/slice_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor slice_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_entr_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_entr_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..039dd3719f721552ec55ad4947932572e8dcdc70 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_entr_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_entr(const at::Tensor & self); +TORCH_API at::Tensor & special_entr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_entr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_log_softmax.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_log_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..73c5272e69ff9d11f42a684b2fb94b7d36f64ce4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_log_softmax.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +TORCH_API inline at::Tensor special_log_softmax(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::special_log_softmax::call(self, dim, dtype); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_logit_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_logit_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4659804730f7954942d888e09f73a2d19c605a3e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_logit_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_logit { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_logit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_logit(Tensor self, float? eps=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional eps); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps); +}; + +struct TORCH_API special_logit_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_logit") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional eps, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_ndtr_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_ndtr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..34ff507bf09b82a9c8a0b6d4ae5df7ecc3ef49a4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_ndtr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_ndtr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_ndtr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_sinc_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_sinc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c1b9e30bcede0db64f456136754ffec1056fb88c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_sinc_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_sinc { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_sinc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_sinc(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_sinc_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_sinc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sspaddmm.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sspaddmm.h new file mode 100644 index 0000000000000000000000000000000000000000..4b2e4b12b44640c4349fb5142fd1d523f1fafb39 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sspaddmm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +TORCH_API inline at::Tensor sspaddmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha); +} + +// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & sspaddmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & sspaddmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sspaddmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trace_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trace_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bb8a8d4d347183bcd6405c4911f7afc3f9bc30 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trace_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor trace_cpu(const at::Tensor & self); +TORCH_API at::Tensor trace_cuda(const at::Tensor & self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/triplet_margin_loss.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/triplet_margin_loss.h new file mode 100644 index 0000000000000000000000000000000000000000..ad58cfc7f5bd6e136b09deae8e2d823ae92686fc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/triplet_margin_loss.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor +TORCH_API inline at::Tensor triplet_margin_loss(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=at::Reduction::Mean) { + return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/triu_indices_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/triu_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b860e1e5e76a5c2fee9b4b1bca93036631480cbc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/triu_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API triu_indices { + using schema = at::Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triu_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/unfold_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/unfold_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e3ae77f6a9a122e4e065d5a4cf928c7c5d5886b7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/unfold_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor +TORCH_API inline at::Tensor unfold_backward(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9f106863a0f91203f2802cbf48333605f97ebb69 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7b49e1722d206f9aacb31c8c1100a36419c21c3b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_bicubic2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..311b9092c084cd464e378aa20eb4942f0e1582b7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_bicubic2d_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +TORCH_API at::Tensor upsample_bicubic2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +struct TORCH_API structured_upsample_bicubic2d_out_cpu : public at::meta::structured_upsample_bicubic2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_bicubic2d_out_cuda : public at::meta::structured_upsample_bicubic2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..052585e105245435a0aad3626b7635a94536e70d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_nearest3d_backward_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::IntArrayRef, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_nearest3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_nearest3d_backward { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_nearest3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/var_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/var_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..52ef9d8a8fc81fcf1a99b968c665e9447629c8f5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/var_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false); +TORCH_API at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false); +TORCH_API at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot.h new file mode 100644 index 0000000000000000000000000000000000000000..69888432c0597ec8da86df7aa17100cbdb3efa47 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::vdot(Tensor self, Tensor other) -> Tensor +TORCH_API inline at::Tensor vdot(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::vdot::call(self, other); +} + +// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & vdot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::vdot_out::call(self, other, out); +} + +// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & vdot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::vdot_out::call(self, other, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..323c083f5b5346aac90afbb5582c2c8f68aab0de --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor vdot(const at::Tensor & self, const at::Tensor & other); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_as_real.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_as_real.h new file mode 100644 index 0000000000000000000000000000000000000000..c71f5d90899456aacbcdd59b11bdb7e4f3914693 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/view_as_real.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::view_as_real(Tensor(a) self) -> Tensor(a) +TORCH_API inline at::Tensor view_as_real(const at::Tensor & self) { + return at::_ops::view_as_real::call(self); +} + +}