diff --git a/.gitattributes b/.gitattributes index 9812a7cc9ec14768f4b119376a1f875f40cd1849..a3f4c938bf23f5ddc3b6756a6868c9f43db6e003 100644 --- a/.gitattributes +++ b/.gitattributes @@ -573,3 +573,4 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/SimpleITK/_ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/lib/libc10_cuda.so filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/lib/libcudart-a7b20f20.so.11.0 filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/bin/python3 filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google/protobuf/pyext/_message.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google/protobuf/pyext/_message.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google/protobuf/pyext/_message.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..04fab65dc4e1472dce955523a2116bfdedec6422 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/google/protobuf/pyext/_message.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e35b49215a789ef70d9dd93830388395dc1a46900604210f340893a102e35e0e +size 2478152 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8a192cb8630f67a4120da9443dccc94c583b924b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _backward(const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_forward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..32e2d4dec10b0e5ab0f21f0e6bb93d35c6e2dbd4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_cdist_forward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cdist_forward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cdist_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor") + static at::Tensor call(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_atan_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_atan_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b5f41cf8b529416175fc592e258450fe3a080bc1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_atan_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::vector foreach_tensor_atan_slow(at::TensorList tensors); +TORCH_API ::std::vector foreach_tensor_atan_cuda(at::TensorList tensors); +TORCH_API ::std::vector _foreach_atan_functional(at::TensorList self); +TORCH_API void foreach_tensor_atan_slow_(at::TensorList self); +TORCH_API void foreach_tensor_atan_cuda_(at::TensorList self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1_native.h new file mode 100644 index 0000000000000000000000000000000000000000..894621ade915f5eb2c0ae33618c2c544826a74f1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_expm1_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API ::std::vector foreach_tensor_expm1_slow(at::TensorList tensors); +TORCH_API ::std::vector foreach_tensor_expm1_cuda(at::TensorList tensors); +TORCH_API ::std::vector _foreach_expm1_functional(at::TensorList self); +TORCH_API void foreach_tensor_expm1_slow_(at::TensorList self); +TORCH_API void foreach_tensor_expm1_cuda_(at::TensorList self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..957daa87239d81dc9b2e51655c10005529422127 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_log_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector _foreach_log_functional(at::TensorList self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2b60148056b46a8a215696e919bd8de69d15c0b3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_sqrt_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_sqrt { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API _foreach_sqrt_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_sqrt_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +struct TORCH_API _foreach_sqrt_functional { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sqrt") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "functional") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sqrt.functional(Tensor[] self) -> Tensor[] self_out") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_trunc_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_trunc_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..58d566c486a8f4d64bf240dd83b7ca5681f8a05e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_foreach_trunc_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_trunc(at::TensorList tensors); +TORCH_API void _foreach_trunc_(at::TensorList self); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_lstm_mps_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_lstm_mps_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6e5add96c4785d512b878b08e393bda1edd188c3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_lstm_mps_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _lstm_mps { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, at::TensorList, bool, int64_t, double, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_lstm_mps") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_masked_softmax_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_masked_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..54728bc6ee1b2c06c473db7f13c022372528e04b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_masked_softmax_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _masked_softmax { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_masked_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_masked_softmax(Tensor self, Tensor mask, int? dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mask, c10::optional dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional dim); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_pad_enum_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_pad_enum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..52570a966667dd493840a89ec97c81b03ed5c31c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_pad_enum_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _pad_enum { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_pad_enum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_pad_enum(Tensor self, int[] pad, int mode, float? value=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional value); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_pdist_forward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_pdist_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a6e98ecfa642f3790c5f84eb12fcff650ca56976 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_pdist_forward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _pdist_forward(const at::Tensor & self, double p=2); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sample_dirichlet_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sample_dirichlet_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..65aac36eba3f8e5d0ba9b54a2813133959f6506d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_sample_dirichlet_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sample_dirichlet { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sample_dirichlet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bae74fd868d6e17b4f051fff32a15020043c275e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_slow_conv2d_forward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _slow_conv2d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & _slow_conv2d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & _slow_conv2d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_torch_cuda_cu_linker_symbol_op.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_torch_cuda_cu_linker_symbol_op.h new file mode 100644 index 0000000000000000000000000000000000000000..569c5e0cee614088e3a7e5d6f72dbeb027516476 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_torch_cuda_cu_linker_symbol_op.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor +TORCH_API inline at::Tensor _torch_cuda_cu_linker_symbol_op(const at::Tensor & self) { + return at::_ops::_torch_cuda_cu_linker_symbol_op::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_meta_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e5d1b905d88cf87f58606dd22e6fe69746780087 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h new file mode 100644 index 0000000000000000000000000000000000000000..38ff20c7c0ddba70bff23a4224b73087afa661fd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () +TORCH_API inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/abs_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/abs_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1d925864d3289034f52f1418e33b2b4e11f3fe60 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/abs_native.h @@ -0,0 +1,31 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor abs(const at::Tensor & self); +TORCH_API at::Tensor & abs_(at::Tensor & self); +TORCH_API at::Tensor & abs_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor abs_sparse(const at::Tensor & self); +TORCH_API at::Tensor & abs_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & abs_sparse_(at::Tensor & self); +TORCH_API at::Tensor abs_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & abs_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & abs_sparse_csr_(at::Tensor & self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f85e61dd4f6f0b36825a680506b715c1bc7e6596 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/avg_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool2d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out); +}; + +struct TORCH_API avg_pool2d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bartlett_window_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bartlett_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6b133217e240e5bba637733214041baddd620140 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bartlett_window_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor bartlett_window(int64_t window_length, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/batch_norm_stats.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/batch_norm_stats.h new file mode 100644 index 0000000000000000000000000000000000000000..72d4b8f2959cd9fd8976551adf2df2feb5fa5bd3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/batch_norm_stats.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) +TORCH_API inline ::std::tuple batch_norm_stats(const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats::call(input, eps); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bilinear_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bilinear_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..230ad6fd7ed338c576ffab97216cbf58aa5398e5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bilinear_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias={}); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bitwise_left_shift_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bitwise_left_shift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..31b3ad1dd68096b40fdf87eadf7c8e37af919ff6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/bitwise_left_shift_ops.h @@ -0,0 +1,94 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bitwise_left_shift_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_left_shift__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API bitwise_left_shift_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API bitwise_left_shift_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API bitwise_left_shift__Tensor_Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API bitwise_left_shift_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +struct TORCH_API bitwise_left_shift_Scalar_Tensor { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_left_shift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/ceil_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/ceil_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cf57f017c020e8183907662dd305e028dffb198a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/ceil_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor ceil(const at::Tensor & self); +TORCH_API at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & ceil_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/chunk_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/chunk_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ee112f074ba1b4a8cf1c095ee47d59636d35facd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/chunk_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/convolution_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/convolution_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..6a37e7e8807e8c7ed03abf148fc78f8f7badd8fc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/convolution_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, int[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +TORCH_API inline ::std::tuple convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/copy.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/copy.h new file mode 100644 index 0000000000000000000000000000000000000000..9a93b774714ef8dd289b077550a9b21e543ead39 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/copy.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor +TORCH_API inline at::Tensor copy(const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy::call(self, src, non_blocking); +} + +// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_out::call(self, src, non_blocking, out); +} + +// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & copy_outf(const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { + return at::_ops::copy_out::call(self, src, non_blocking, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cosh_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cosh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eb671173d21101742e86a95d36dc90dc2c2e2ee1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cosh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cosh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cosh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API cosh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cosh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cosh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API cosh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de9b36bbeda4da429980947514e60ba563d85f46 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cross(const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & cross_outf(const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/diagflat_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/diagflat_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..23d79106c7f45b82c101d360dd31b3128894d40c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/diagflat_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor diagflat(const at::Tensor & self, int64_t offset=0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/einsum_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/einsum_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b6655d692cf3feff34e1da41d3fdac7cbaffd8e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/einsum_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor einsum(c10::string_view equation, at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/expand_as_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/expand_as_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0282ac8b4b3703feb3f5e9fe341c693052c7b211 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/expand_as_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API expand_as { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::expand_as") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "expand_as(Tensor(a) self, Tensor other) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/exponential_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/exponential_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3015e811338898b21e547d47be05a85c711e8d01 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/exponential_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor exponential_functional(const at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9944c6acf7e23340a33d837340c617fc02dea225 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..59c0b33958f224a4254e38e4b94db8e831990539 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input); +TORCH_API at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3ce339b42d8e510a169c9207aa51deaa41f15109 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fbgemm_pack_quantized_matrix_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input); +TORCH_API at::Tensor fbgemm_pack_quantized_matrix(const at::Tensor & input, int64_t K, int64_t N); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_fftn_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_fftn_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0ba165e13e97d0275d99ba5d3443854a38b57ac2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_fftn_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_fftn_out(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ifft2.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ifft2.h new file mode 100644 index 0000000000000000000000000000000000000000..63b3c53220689fd321141c0199df6cb635076a9a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_ifft2.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +TORCH_API inline at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2::call(self, s, dim, norm); +} + +// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out); +} + +// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_rfft2.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_rfft2.h new file mode 100644 index 0000000000000000000000000000000000000000..d3684a0688e4d882fad8d0c06eb0df14fdf0b6e2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_rfft2.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +TORCH_API inline at::Tensor fft_rfft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::call(self, s, dim, norm); +} + +// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & fft_rfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); +} + +// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & fft_rfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::call(self, s, dim, norm, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_rfftn_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_rfftn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ba59c1635f8cf7c57a75f070cb53122a46816db7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/fft_rfftn_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_rfftn { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfftn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm); +}; + +struct TORCH_API fft_rfftn_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfftn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/float_power_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/float_power_native.h new file mode 100644 index 0000000000000000000000000000000000000000..520240c747c7ba2ceb01d716ad71fe9ce4601aa8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/float_power_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_out(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor float_power(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & float_power_out(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor float_power(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & float_power_out(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +TORCH_API at::Tensor & float_power_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hamming_window_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hamming_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1d6d78320d10ef37cfeff955cc0835eb4cd4dd8c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hamming_window_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hamming_window { + using schema = at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hamming_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic { + using schema = at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hamming_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic_alpha { + using schema = at::Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hamming_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic_alpha") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API hamming_window_periodic_alpha_beta { + using schema = at::Tensor (int64_t, bool, double, double, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hamming_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic_alpha_beta") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hardswish_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hardswish_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..1ed0aba45fadc00ffe8038768be7dfddeab3b203 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hardswish_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor +TORCH_API inline at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::call(grad_output, self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hardtanh.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hardtanh.h new file mode 100644 index 0000000000000000000000000000000000000000..ce3071fbb0269d9c2c96ef80b4b7b29cdd1ca20f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/hardtanh.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & hardtanh_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} + +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +TORCH_API inline at::Tensor & hardtanh_outf(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::call(self, min_val, max_val, out); +} + +// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor +TORCH_API inline at::Tensor hardtanh(const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::call(self, min_val, max_val); +} + +// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) +TORCH_API inline at::Tensor & hardtanh_(at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::call(self, min_val, max_val); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_conj_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_conj_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b20953abe6a665d9f40e59ac413e263397901d89 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/is_conj_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_conj { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_conj") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_conj(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ad2cb96cfb236417ae26f851cbac7143aba3c709 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API leaky_relu_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::leaky_relu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input); +}; + +struct TORCH_API leaky_relu_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::leaky_relu_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3fbc282ab518dc9d8348d5042c79e3a6ad580cad --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/leaky_relu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e8877bf4f3df907b3ff15e55f0a75eaa0279f9d7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_cholesky_ex_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false); +TORCH_API ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_cond_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_cond_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..898436b3681d9aff9cdf7d1b51c9f8ed0d8c0ca0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/linalg_cond_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_cond { + using schema = at::Tensor (const at::Tensor &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_cond") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_cond(Tensor self, Scalar? p=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const c10::optional & p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p); +}; + +struct TORCH_API linalg_cond_out { + using schema = at::Tensor & (const at::Tensor &, const c10::optional &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_cond") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const c10::optional & p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::Tensor & out); +}; + +struct TORCH_API linalg_cond_p_str { + using schema = at::Tensor (const at::Tensor &, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_cond") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "p_str") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_cond.p_str(Tensor self, str p) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::string_view p); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p); +}; + +struct TORCH_API linalg_cond_p_str_out { + using schema = at::Tensor & (const at::Tensor &, c10::string_view, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_cond") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "p_str_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::string_view p, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fc30a3e2f561866689ec1e1e974fb7451acec218 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mean_cuda_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mean_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..483bd07521346104aea7c6f42dae9f28999bb892 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mean_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor mean(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & mean_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & mean_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mish_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mish_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a6b8a2d6636963593accb024a97df0f015244b7f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mish_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +TORCH_API at::Tensor mish(const at::Tensor & self); +TORCH_API at::Tensor & mish_(at::Tensor & self); +struct TORCH_API structured_mish_out : public at::meta::structured_mish { +void impl(const at::Tensor & self, const at::Tensor & out); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mse_loss_backward_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mse_loss_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..347339bafe236dee5498c571a46a1e64f3bb766b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/mse_loss_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mse_loss_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mse_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input); +}; + +struct TORCH_API mse_loss_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mse_loss_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_batch_norm_backward.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_batch_norm_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..0f9134c0c54f824972e16af70e6188c048d3b386 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/native_batch_norm_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +TORCH_API inline ::std::tuple native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/new_empty_strided_ops.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/new_empty_strided_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..96cf265b3fa889f3b0257fb4ef4b8de1103e9910 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/new_empty_strided_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API new_empty_strided { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::new_empty_strided") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "new_empty_strided(Tensor self, int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss2d_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..189fb39a184401a73ccaadaeca7789593bf49fdd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nll_loss2d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100); +TORCH_API at::Tensor & nll_loss2d_out(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nonzero_numpy.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nonzero_numpy.h new file mode 100644 index 0000000000000000000000000000000000000000..0be66bf74e09b62d29b8e1e601f8c2d868a83f8e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/nonzero_numpy.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nonzero_numpy(Tensor self) -> Tensor[] +TORCH_API inline ::std::vector nonzero_numpy(const at::Tensor & self) { + return at::_ops::nonzero_numpy::call(self); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/prelu_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/prelu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e9e02e88bde4d9b86d8fa0eb27f0181d1a4f8453 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/prelu_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor prelu_cpu(const at::Tensor & self, const at::Tensor & weight); +TORCH_API at::Tensor prelu_cuda(const at::Tensor & self, const at::Tensor & weight); +TORCH_API at::Tensor mkldnn_prelu(const at::Tensor & self, const at::Tensor & weight); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/refine_names_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/refine_names_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b7dd6f17fe3ad9fe45bbf27024ceea2856b711c7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/refine_names_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor refine_names(const at::Tensor & self, at::DimnameList names); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5c8ef8f162daf1c289c3a41d1f831c8b7a543bd1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/resolve_conj_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor resolve_conj(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_add_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_add_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..15665814fa0ee6e4b62bff3dee028ef43c8bb9c0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/scatter_add_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_scatter_add : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sign_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sign_native.h new file mode 100644 index 0000000000000000000000000000000000000000..36279c745cb50ff4a1f7a5ca2c7fe4087148eb5b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sign_native.h @@ -0,0 +1,33 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +TORCH_API at::Tensor sign(const at::Tensor & self); +TORCH_API at::Tensor & sign_(at::Tensor & self); +struct TORCH_API structured_sign_out : public at::meta::structured_sign { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor sign_sparse(const at::Tensor & self); +TORCH_API at::Tensor & sign_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sign_sparse_(at::Tensor & self); +TORCH_API at::Tensor sign_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & sign_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sign_sparse_csr_(at::Tensor & self); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/size_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/size_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b8a3fc58301808f83e630b80f3ebe252b0ea31d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/size_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API int64_t size(const at::Tensor & self, int64_t dim); +TORCH_API int64_t size(const at::Tensor & self, at::Dimname dim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..207b64ccfbb0d95ee730a5f006d6dc41a103cf7e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/sparse_resize_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor sparse_resize_functional(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d961ab94eb108d301b76abba79cee6f031ef866 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_expm1_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_expm1(const at::Tensor & self); +TORCH_API at::Tensor & special_expm1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_expm1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_expm1_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_expm1_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fba5dec0d4996fe37998d41068d95d36205e4574 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_expm1_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor special_expm1(const at::Tensor & self); +TORCH_API at::Tensor & special_expm1_out(const at::Tensor & self, at::Tensor & out); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_zeta_meta.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_zeta_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..4394c2a1ca9bf97ff39f3b94966339adc4769e4d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/special_zeta_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_zeta : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..4581f8401b949d05c600f7ce32da3fb48e017ced --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split_with_sizes_copy(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] +TORCH_API inline ::std::vector split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); +} + +// aten::split_with_sizes_copy.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +TORCH_API inline void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); +} + +// aten::split_with_sizes_copy.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +TORCH_API inline void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); +} + +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/squeeze_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/squeeze_native.h new file mode 100644 index 0000000000000000000000000000000000000000..18f6dc987ea0390d3a05cdae81070af0863dc929 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/squeeze_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor squeeze(const at::Tensor & self); +TORCH_API at::Tensor squeeze_quantized(const at::Tensor & self); +TORCH_API at::Tensor & squeeze_(at::Tensor & self); +TORCH_API at::Tensor squeeze(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor squeeze_quantized(const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, int64_t dim); +TORCH_API at::Tensor squeeze(const at::Tensor & self, at::Dimname dim); +TORCH_API at::Tensor & squeeze_(at::Tensor & self, at::Dimname dim); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/stride_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/stride_native.h new file mode 100644 index 0000000000000000000000000000000000000000..76456224aebe19e141df8c53dbe5c9c71f19f6b2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/stride_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API int64_t stride(const at::Tensor & self, int64_t dim); +TORCH_API int64_t stride(const at::Tensor & self, at::Dimname dim); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f05972c32a4a7aa87f61f55cf2c2bbbd8d7301a2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1); +TORCH_API at::Tensor & swapaxes_(at::Tensor & self, int64_t axis0, int64_t axis1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trace_backward_native.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trace_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..21b3a7215609b5d0e371cefacd3c51d7492d5b00 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/trace_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { + +TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes); + +} // namespace native +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/uniform_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/uniform_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c86e0483fe12b93729d646b76a46d758e8e76029 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/uniform_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & uniform_(at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41549bc2efecf525aa6e62c40f0131d905698075 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors); +TORCH_API at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f855108df0bd5368b7715479d463f6efd25496fe --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot_cpu_dispatch.h b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..29eadc66409f11511d66c544efca700d6bf35980 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch/include/ATen/ops/vdot_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor vdot(const at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/tmp_inputs_32_24/case00006.nii.gz b/tmp_inputs_32_24/case00006.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..d7f3e508d05292779e7d830f5a56371eb9556713 --- /dev/null +++ b/tmp_inputs_32_24/case00006.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df4d611d2fff177e1dc912a287e08dc0ce968b44d9453f2f49f4e3728e3de4e +size 40603925