| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| #if defined(_MSC_VER)
|
| #pragma message("crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| #else
|
| #warning "crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| #endif
|
| #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__
|
| #endif
|
|
|
| #if !defined(__SM_80_RT_HPP__)
|
| #define __SM_80_RT_HPP__
|
|
|
| #if defined(__CUDACC_RTC__)
|
| #define __SM_80_RT_DECL__ __host__ __device__
|
| #else
|
| #define __SM_80_RT_DECL__ static __device__ __inline__
|
| #endif
|
|
|
| #if defined(__cplusplus) && defined(__CUDACC__)
|
|
|
| #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
|
|
|
| |
| |
| |
| |
|
|
|
|
| #include "builtin_types.h"
|
| #include "device_types.h"
|
| #include "host_defines.h"
|
|
|
| |
| |
| |
| |
| |
|
|
|
|
| extern "C" {
|
| __device_builtin__ __device__ unsigned __reduce_add_sync_unsigned_impl(unsigned, unsigned);
|
| __device_builtin__ __device__ unsigned __reduce_min_sync_unsigned_impl(unsigned, unsigned);
|
| __device_builtin__ __device__ unsigned __reduce_max_sync_unsigned_impl(unsigned, unsigned);
|
| __device_builtin__ __device__ int __reduce_add_sync_signed_impl(unsigned, int);
|
| __device_builtin__ __device__ int __reduce_min_sync_signed_impl(unsigned, int);
|
| __device_builtin__ __device__ int __reduce_max_sync_signed_impl(unsigned, int);
|
| __device_builtin__ __device__ unsigned __reduce_or_sync_unsigned_impl(unsigned, unsigned);
|
| __device_builtin__ __device__ unsigned __reduce_and_sync_unsigned_impl(unsigned, unsigned);
|
| __device_builtin__ __device__ unsigned __reduce_xor_sync_unsigned_impl(unsigned, unsigned);
|
| }
|
|
|
| __SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) {
|
| return __reduce_add_sync_unsigned_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) {
|
| return __reduce_min_sync_unsigned_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) {
|
| return __reduce_max_sync_unsigned_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) {
|
| return __reduce_add_sync_signed_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) {
|
| return __reduce_min_sync_signed_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) {
|
| return __reduce_max_sync_signed_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) {
|
| return __reduce_and_sync_unsigned_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) {
|
| return __reduce_or_sync_unsigned_impl(mask, value);
|
| }
|
|
|
| __SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) {
|
| return __reduce_xor_sync_unsigned_impl(mask, value);
|
| }
|
| #endif
|
|
|
| #endif
|
|
|
| #undef __SM_80_RT_DECL__
|
|
|
| #endif
|
|
|
| #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__)
|
| #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__
|
| #endif
|
|
|