diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/common_functions.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/common_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..b7e70950fb51d0d58f8dd99239e6b36ba89c4779 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/common_functions.h @@ -0,0 +1,310 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__ +#endif + +#if !defined(__COMMON_FUNCTIONS_H__) +#define __COMMON_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "builtin_types.h" +#include "host_defines.h" + +#define __CUDACC_VER__ "__CUDACC_VER__ is no longer supported. Use __CUDACC_VER_MAJOR__, __CUDACC_VER_MINOR__, and __CUDACC_VER_BUILD__ instead." + +#ifndef __CUDA_API_VER_MAJOR__ +#define __CUDA_API_VER_MAJOR__ __CUDACC_VER_MAJOR__ +#endif /* __CUDA_API_VER_MAJOR__ */ + +#ifndef __CUDA_API_VER_MINOR__ +#define __CUDA_API_VER_MINOR__ __CUDACC_VER_MINOR__ +#endif /* __CUDA_API_VER_MINOR__ */ + +#if !defined(__CUDACC_RTC__) +#include +#include + +extern "C" +{ +#endif /* !__CUDACC_RTC__ */ +extern _CRTIMP __host__ __device__ __device_builtin__ __cudart_builtin__ clock_t __cdecl clock(void) +#if defined(__QNX__) +asm("clock32") +#endif +__THROW; +extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memset(void*, int, size_t) __THROW; +extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memcpy(void*, const void*, size_t) __THROW; +#if !defined(__CUDACC_RTC__) +} +#endif /* !__CUDACC_RTC__ */ + +#if defined(__CUDA_ARCH__) + +#if defined(__CUDACC_RTC__) +inline __host__ __device__ void* operator new(size_t, void *p) { return p; } +inline __host__ __device__ void* operator new[](size_t, void *p) { return p; } +inline __host__ __device__ void operator delete(void*, void*) { } +inline __host__ __device__ void operator delete[](void*, void*) { } +#else /* !__CUDACC_RTC__ */ +#ifndef __CUDA_INTERNAL_SKIP_CPP_HEADERS__ +#include +#endif + +#if defined (__GNUC__) + +#define STD \ + std:: + +#else /* __GNUC__ */ + +#define STD + +#endif /* __GNUC__ */ + +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, void*) throw(); +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, void*) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, void*) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, void*) throw(); +# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw(); +#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */ +#endif /* __CUDACC_RTC__ */ + +#if !defined(__CUDACC_RTC__) +#include +#include +#endif /* !__CUDACC_RTC__ */ + +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +namespace std { +#endif +extern "C" +{ +extern +#if !defined(_MSC_VER) || _MSC_VER < 1900 +_CRTIMP +#endif + +#if defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) ) +__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...) __THROW; +#else /* newer glibc */ +__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...); +#endif /* defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) ) */ + + +extern _CRTIMP __host__ __device__ __cudart_builtin__ void* __cdecl malloc(size_t) __THROW; +extern _CRTIMP __host__ __device__ __cudart_builtin__ void __cdecl free(void*) __THROW; + +#if defined(_MSC_VER) +extern __host__ __device__ __cudart_builtin__ void* __cdecl _alloca(size_t); +#endif + +#if defined(__QNX__) +#undef alloca +#define alloca(__S) __builtin_alloca(__S) +#endif +} +#if defined(__QNX__) && !defined(_LIBCPP_VERSION) +} /* std */ +#endif + +#if !defined(__CUDACC_RTC__) +#include +#endif /* !__CUDACC_RTC__ */ + +extern "C" +{ +#if defined(__CUDACC_RTC__) +extern __host__ __device__ void __assertfail(const char * __assertion, + const char *__file, + unsigned int __line, + const char *__function, + size_t charsize); +#elif defined(__APPLE__) +#define __builtin_expect(exp,c) (exp) +extern __host__ __device__ __cudart_builtin__ void __assert_rtn( + const char *, const char *, int, const char *); +#elif defined(__ANDROID__) +extern __host__ __device__ __cudart_builtin__ void __assert2( + const char *, int, const char *, const char *); +#elif defined(__QNX__) +#if !defined(_LIBCPP_VERSION) +namespace std { +#endif +extern __host__ __device__ __cudart_builtin__ void __assert( + const char *, const char *, unsigned int, const char *); +#if !defined(_LIBCPP_VERSION) +} +#endif +#elif defined(__HORIZON__) +extern __host__ __device__ __cudart_builtin__ void __assert_fail( + const char *, const char *, int, const char *); +#elif defined(__GNUC__) +extern __host__ __device__ __cudart_builtin__ void __assert_fail( + const char *, const char *, unsigned int, const char *) + __THROW; +#elif defined(_WIN32) +extern __host__ __device__ __cudart_builtin__ _CRTIMP void __cdecl _wassert( + const wchar_t *, const wchar_t *, unsigned); +#endif +} + +#if defined(__CUDACC_RTC__) +#ifdef NDEBUG +#define assert(e) (static_cast(0)) +#else /* !NDEBUG */ +#define __ASSERT_STR_HELPER(x) #x +#define assert(e) ((e) ? static_cast(0)\ + : __assertfail(__ASSERT_STR_HELPER(e), __FILE__,\ + __LINE__, __PRETTY_FUNCTION__,\ + sizeof(char))) +#endif /* NDEBUG */ +__host__ __device__ void* operator new(size_t); +__host__ __device__ void* operator new[](size_t); +__host__ __device__ void operator delete(void*); +__host__ __device__ void operator delete[](void*); +# if __cplusplus >= 201402L +__host__ __device__ void operator delete(void*, size_t); +__host__ __device__ void operator delete[](void*, size_t); +#endif /* __cplusplus >= 201402L */ + +#if __cplusplus >= 201703L +namespace std { enum class align_val_t : size_t {}; } +__host__ __device__ void* __cdecl operator new(size_t sz, std::align_val_t) noexcept; +__host__ __device__ void* __cdecl operator new[](size_t sz, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete(void* ptr, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete[](void* ptr, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete(void* ptr, size_t, std::align_val_t) noexcept; +__host__ __device__ void __cdecl operator delete[](void* ptr, size_t, std::align_val_t) noexcept; +#endif /* __cplusplus >= 201703L */ + +#else /* !__CUDACC_RTC__ */ +#if defined (__GNUC__) + +#define __NV_GLIBCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +#if (__cplusplus >= 201103L) && ((!(defined(__QNX__) && defined(_LIBCPP_VERSION))) || (defined(__QNX__) && __NV_GLIBCXX_VERSION >= 80300)) +#define THROWBADALLOC +#else +#if defined(__ANDROID__) && !defined(_LIBCPP_VERSION) && (defined(__BIONIC__) || __NV_GLIBCXX_VERSION < 40900) +#define THROWBADALLOC +#else +#define THROWBADALLOC throw(STD bad_alloc) +#endif +#endif +#define __DELETE_THROW throw() + +#undef __NV_GLIBCXX_VERSION + +#else /* __GNUC__ */ + +#define THROWBADALLOC throw(...) + +#endif /* __GNUC__ */ + +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t) THROWBADALLOC; +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t) THROWBADALLOC; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*) throw(); +# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw(); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw(); +#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */ + +#if __cpp_aligned_new +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, std::align_val_t); +extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, std::align_val_t); +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, std::align_val_t) noexcept; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, std::align_val_t) noexcept; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t, std::align_val_t) noexcept; +extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t, std::align_val_t) noexcept; +#endif /* __cpp_aligned_new */ + +#undef THROWBADALLOC +#undef STD +#endif /* __CUDACC_RTC__ */ + +#endif /* __CUDA_ARCH__ */ + +#endif /* __cplusplus && __CUDACC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__CUDACC_RTC__) && (__CUDA_ARCH__ >= 350) +#include "cuda_device_runtime_api.h" +#endif + +#include "math_functions.h" + +#endif /* !__COMMON_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/cudacc_ext.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/cudacc_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..8d244463e73f0f7569a4707002c8e059bca67c6d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/cudacc_ext.h @@ -0,0 +1,64 @@ +/* + * Copyright 2021-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__ +#endif + + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..cf5704278e7e3bf7ee6d10ea1d794fa31a63e7fc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.h @@ -0,0 +1,3651 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H__ +#endif + +#if !defined(__DEVICE_FUNCTIONS_H__) +#define __DEVICE_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#define __DEVICE_FUNCTIONS_DECL__ __device__ __cudart_builtin__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ __device__ __cudart_builtin__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ __device__ __host__ __cudart_builtin__ +#else +#define __DEVICE_FUNCTIONS_DECL__ __device__ __cudart_builtin__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __cudart_builtin__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __host__ __cudart_builtin__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + + +//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time) +#define EXCLUDE_FROM_RTC + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +extern "C" +{ +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 32 bits of the product of the two 32-bit integers. + * + * Calculate the most significant 32 bits of the 64-bit product \p x * \p y, where \p x and \p y + * are 32-bit integers. + * + * \return Returns the most significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __mulhi(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 32 bits of the product of the two 32-bit unsigned integers. + * + * Calculate the most significant 32 bits of the 64-bit product \p x * \p y, where \p x and \p y + * are 32-bit unsigned integers. + * + * \return Returns the most significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __umulhi(unsigned int x, unsigned int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 64 bits of the product of the two 64-bit integers. + * + * Calculate the most significant 64 bits of the 128-bit product \p x * \p y, where \p x and \p y + * are 64-bit integers. + * + * \return Returns the most significant 64 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __mul64hi(long long int x, long long int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the most significant 64 bits of the product of the two 64 unsigned bit integers. + * + * Calculate the most significant 64 bits of the 128-bit product \p x * \p y, where \p x and \p y + * are 64-bit unsigned integers. + * + * \return Returns the most significant 64 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __umul64hi(unsigned long long int x, unsigned long long int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in an integer as a float. + * + * Reinterpret the bits in the signed integer value \p x as a single-precision + * floating-point value. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int_as_float(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a float as a signed integer. + * + * Reinterpret the bits in the single-precision floating-point value \p x + * as a signed integer. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float_as_int(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in an unsigned integer as a float. + * + * Reinterpret the bits in the unsigned integer value \p x as a single-precision + * floating-point value. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint_as_float(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Reinterpret bits in a float as a unsigned integer. + * + * Reinterpret the bits in the single-precision floating-point value \p x + * as a unsigned integer. + * \return Returns reinterpreted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float_as_uint(float x); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __syncthreads(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __prof_trigger(int); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __threadfence(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __threadfence_block(void); +__DEVICE_FUNCTIONS_DECL__ +#if defined(__GNUC__) || defined(__CUDACC_RTC__) +__attribute__((__noreturn__)) +#elif defined(_MSC_VER) +__declspec(noreturn) +#endif /* defined(__GNUC__) || defined(__CUDACC_RTC__) */ +__device_builtin__ void __trap(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ void __brkpt(); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Clamp the input argument to [+0.0, 1.0]. + * + * Clamp the input argument \p x to be within the interval [+0.0, 1.0]. + * \return + * - __saturatef(\p x) returns 0 if \p x < 0. + * - __saturatef(\p x) returns 1 if \p x > 1. + * - __saturatef(\p x) returns \p x if + * \latexonly $0 \le x \le 1$ \endlatexonly + * \xmlonly + * + * + * 0 + * + * x + * + * 1 + * + * \endxmlonly. + * - __saturatef(NaN) returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __saturatef(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the sum of absolute difference. + * + * Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the 32-bit sum of the third argument \p z plus and the absolute + * value of the difference between the first argument, \p x, and second + * argument, \p y. + * + * Inputs \p x and \p y are signed 32-bit integers, input \p z is + * a 32-bit unsigned integer. + * + * \return Returns + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * \endxmlonly. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __sad(int x, int y, unsigned int z); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the sum of absolute difference. + * + * Calculate + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * + * \endxmlonly + * , the 32-bit sum of the third argument \p z plus and the absolute + * value of the difference between the first argument, \p x, and second + * argument, \p y. + * + * Inputs \p x, \p y, and \p z are unsigned 32-bit integers. + * + * \return Returns + * \latexonly $|x - y| + z$ \endlatexonly + * \xmlonly + * + * + * + * | + * + * x + * + * y + * + * | + * + * + + * z + * + * \endxmlonly. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __usad(unsigned int x, unsigned int y, unsigned int z); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the least significant 32 bits of the product of the least significant 24 bits of two integers. + * + * Calculate the least significant 32 bits of the product of the least significant 24 bits of \p x and \p y. + * The high order 8 bits of \p x and \p y are ignored. + * + * \return Returns the least significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __mul24(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Calculate the least significant 32 bits of the product of the least significant 24 bits of two unsigned integers. + * + * Calculate the least significant 32 bits of the product of the least significant 24 bits of \p x and \p y. + * The high order 8 bits of \p x and \p y are ignored. + * + * \return Returns the least significant 32 bits of the product \p x * \p y. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __umul24(unsigned int x, unsigned int y); +/** + * \ingroup CUDA_MATH_SINGLE + * \brief Divide two floating-point values. + * + * Compute \p x divided by \p y. If --use_fast_math is specified, + * use ::__fdividef() for higher performance, otherwise use normal division. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + * \note_fastmath + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float fdividef(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate division of the input arguments. + * + * Calculate the fast approximate division of \p x by \p y. + * + * \return Returns \p x / \p y. + * - __fdividef( + * \latexonly $\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly + * , \p y) returns NaN for + * \latexonly $2^{126} < |y| < 2^{128}$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * 126 + * + * + * < + * |y| + * < + * + * 2 + * + * 128 + * + * + * + * \endxmlonly. + * - __fdividef(\p x, \p y) returns 0 for + * \latexonly $2^{126} < |y| < 2^{128}$ \endlatexonly + * \xmlonly + * + * + * + * 2 + * + * 126 + * + * + * < + * |y| + * < + * + * 2 + * + * 128 + * + * + * + * + * \endxmlonly + * and finite + * \latexonly $x$ \endlatexonly + * \xmlonly + * + * + * x + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdividef(float x, float y); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ double fdivide(double x, double y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate sine of the input argument. + * + * Calculate the fast approximate sine of the input argument \p x, measured in radians. + * + * \return Returns the approximate sine of \p x. + * + * \note_accuracy_single_intrinsic + * \note Output in the denormal range is flushed to sign preserving 0.0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __sinf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate cosine of the input argument. + * + * Calculate the fast approximate cosine of the input argument \p x, measured in radians. + * + * \return Returns the approximate cosine of \p x. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __cosf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate tangent of the input argument. + * + * Calculate the fast approximate tangent of the input argument \p x, measured in radians. + * + * \return Returns the approximate tangent of \p x. + * + * \note_accuracy_single_intrinsic + * \note The result is computed as the fast divide of ::__sinf() + * by ::__cosf(). Denormal output is flushed to sign-preserving 0.0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __tanf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate of sine and cosine of the first input argument. + * + * Calculate the fast approximate of sine and cosine of the first input argument \p x (measured + * in radians). The results for sine and cosine are written into the second + * argument, \p sptr, and, respectively, third argument, \p cptr. + * + * \return + * - none + * + * \note_accuracy_single_intrinsic + * \note Denorm input/output is flushed to sign preserving 0.0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ void __sincosf(float x, float *sptr, float *cptr) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument. + * + * Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * exponential of the input argument \p x, + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly. + * + * \return Returns an approximation to + * \latexonly $e^x$ \endlatexonly + * \xmlonly + * + * + * + * e + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __expf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base 10 exponential of the input argument. + * + * Calculate the fast approximate base 10 exponential of the input argument \p x, + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly. + * + * \return Returns an approximation to + * \latexonly $10^x$ \endlatexonly + * \xmlonly + * + * + * + * 10 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __exp10f(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base 2 logarithm of the input argument. + * + * Calculate the fast approximate base 2 logarithm of the input argument \p x. + * + * \return Returns an approximation to + * \latexonly $\log_2(x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * 2 + * + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __log2f(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base 10 logarithm of the input argument. + * + * Calculate the fast approximate base 10 logarithm of the input argument \p x. + * + * \return Returns an approximation to + * \latexonly $\log_{10}(x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * + * 10 + * + * + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __log10f(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument. + * + * Calculate the fast approximate base + * \latexonly $e$ \endlatexonly + * \xmlonly + * + * + * e + * + * + * \endxmlonly + * logarithm of the input argument \p x. + * + * \return Returns an approximation to + * \latexonly $\log_e(x)$ \endlatexonly + * \xmlonly + * + * + * + * log + * e + * + * + * ( + * x + * ) + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __logf(float x) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Calculate the fast approximate of + * \latexonly $x^y$ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * + * Calculate the fast approximate of \p x, the first input argument, + * raised to the power of \p y, the second input argument, + * \latexonly $x^y$ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * + * \return Returns an approximation to + * \latexonly $x^y$ \endlatexonly + * \xmlonly + * + * + * + * x + * y + * + * + * \endxmlonly. + * + * \note_accuracy_single_intrinsic + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ __cudart_builtin__ float __powf(float x, float y) __THROW; +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_ru(float); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to a signed integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __float2int_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to an unsigned integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __float2uint_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-to-nearest-even mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_rn(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-towards-zero mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_rz(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-up mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_ru(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-down mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __int2float_rd(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-to-nearest-even mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_rn(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-towards-zero mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_rz(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-up mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_ru(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-down mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __uint2float_rd(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to a signed 64-bit integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to a signed 64-bit integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __float2ll_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-to-nearest-even mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-towards-zero mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-up mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a float to an unsigned 64-bit integer in round-down mode. + * + * Convert the single-precision floating-point value \p x to an unsigned 64-bit integer + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __float2ull_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed 64-bit integer to a float in round-to-nearest-even mode. + * + * Convert the signed 64-bit integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_rn(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-towards-zero mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_rz(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-up mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_ru(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a signed integer to a float in round-down mode. + * + * Convert the signed integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ll2float_rd(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-to-nearest-even mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-to-nearest-even mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_rn(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-towards-zero mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_rz(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-up mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-up (to positive infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_ru(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert an unsigned integer to a float in round-down mode. + * + * Convert the unsigned integer value \p x to a single-precision floating-point value + * in round-down (to negative infinity) mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __ull2float_rd(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-to-nearest-even mode. + * + * Compute the sum of \p x and \p y in round-to-nearest-even rounding mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-towards-zero mode. + * + * Compute the sum of \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-up mode. + * + * Compute the sum of \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Add two floating-point values in round-down mode. + * + * Compute the sum of \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x + \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fadd_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-to-nearest-even mode. + * + * Compute the difference of \p x and \p y in round-to-nearest-even rounding mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-towards-zero mode. + * + * Compute the difference of \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-up mode. + * + * Compute the difference of \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Subtract two floating-point values in round-down mode. + * + * Compute the difference of \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x - \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsub_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-to-nearest-even mode. + * + * Compute the product of \p x and \p y in round-to-nearest-even mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-towards-zero mode. + * + * Compute the product of \p x and \p y in round-towards-zero mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-up mode. + * + * Compute the product of \p x and \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Multiply two floating-point values in round-down mode. + * + * Compute the product of \p x and \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x * \p y. + * + * \note_accuracy_single + * \note_nofma + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmul_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-to-nearest-even mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-to-nearest-even mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_rn(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-towards-zero mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-towards-zero mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_rz(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-up mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-up (to positive infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_ru(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation, in round-down mode. + * + * Computes the value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single ternary operation, rounding the + * result once in round-down (to negative infinity) mode. + * + * \return Returns the rounded value of + * \latexonly $x \times y + z$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + + * z + * + * + * \endxmlonly + * as a single operation. + * - fmaf( + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf( + * \latexonly $\pm 0$ \endlatexonly + * \xmlonly + * + * + * ± + * 0 + * + * + * \endxmlonly + * , + * \latexonly $\pm \infty$ \endlatexonly + * \xmlonly + * + * + * ± + * + * + * + * \endxmlonly + * , \p z) returns NaN. + * - fmaf(\p x, \p y, + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * \endxmlonly. + * - fmaf(\p x, \p y, + * \latexonly $+\infty$ \endlatexonly + * \xmlonly + * + * + * + + * + * + * + * \endxmlonly + * ) returns NaN if + * \latexonly $x \times y$ \endlatexonly + * \xmlonly + * + * + * x + * × + * y + * + * + * \endxmlonly + * is an exact + * \latexonly $-\infty$ \endlatexonly + * \xmlonly + * + * + * + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fmaf_rd(float x, float y, float z); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the reciprocal of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the reciprocal of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the reciprocal of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the reciprocal of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\frac{1}{x}$ \endlatexonly + * \xmlonly + * + * + * + * 1 + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frcp_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the square root of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-towards-zero mode. + * + * Compute the square root of \p x in round-towards-zero mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_rz(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-up mode. + * + * Compute the square root of \p x in round-up (to positive infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_ru(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * + * \endxmlonly + * in round-down mode. + * + * Compute the square root of \p x in round-down (to negative infinity) mode. + * + * \return Returns + * \latexonly $\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fsqrt_rd(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Compute + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * + * \endxmlonly + * in round-to-nearest-even mode. + * + * Compute the reciprocal square root of \p x in round-to-nearest-even mode. + * + * \return Returns + * \latexonly $1/\sqrt{x}$ \endlatexonly + * \xmlonly + * + * + * 1 + * + * / + * + * + * x + * + * + * \endxmlonly. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __frsqrt_rn(float x); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-to-nearest-even mode. + * + * Divide two floating-point values \p x by \p y in round-to-nearest-even mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_rn(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-towards-zero mode. + * + * Divide two floating-point values \p x by \p y in round-towards-zero mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_rz(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-up mode. + * + * Divide two floating-point values \p x by \p y in round-up (to positive infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_ru(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_SINGLE + * \brief Divide two floating-point values in round-down mode. + * + * Divide two floating-point values \p x by \p y in round-down (to negative infinity) mode. + * + * \return Returns \p x / \p y. + * + * \note_accuracy_single + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ float __fdiv_rd(float x, float y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Return the number of consecutive high-order zero bits in a 32-bit integer. + * + * Count the number of consecutive leading zero bits, starting at the most significant bit (bit 31) of \p x. + * + * \return Returns a value between 0 and 32 inclusive representing the number of zero bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __clz(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Find the position of the least significant bit set to 1 in a 32-bit integer. + * + * Find the position of the first (least significant) bit set to 1 in \p x, where the least significant + * bit position is 1. + * + * \return Returns a value between 0 and 32 inclusive representing the position of the first bit set. + * - __ffs(0) returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __ffs(int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Count the number of bits that are set to 1 in a 32-bit integer. + * + * Count the number of bits that are set to 1 in \p x. + * + * \return Returns a value between 0 and 32 inclusive representing the number of set bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __popc(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Reverse the bit order of a 32-bit unsigned integer. + * + * Reverses the bit order of the 32-bit unsigned integer \p x. + * + * \return Returns the bit-reversed value of \p x. i.e. bit N of the return value corresponds to bit 31-N of \p x. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __brev(unsigned int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Count the number of consecutive high-order zero bits in a 64-bit integer. + * + * Count the number of consecutive leading zero bits, starting at the most significant bit (bit 63) of \p x. + * + * \return Returns a value between 0 and 64 inclusive representing the number of zero bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __clzll(long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Find the position of the least significant bit set to 1 in a 64-bit integer. + * + * Find the position of the first (least significant) bit set to 1 in \p x, where the least significant + * bit position is 1. + * + * \return Returns a value between 0 and 64 inclusive representing the position of the first bit set. + * - __ffsll(0) returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __ffsll(long long int x); + + +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Count the number of bits that are set to 1 in a 64-bit integer. + * + * Count the number of bits that are set to 1 in \p x. + * + * \return Returns a value between 0 and 64 inclusive representing the number of set bits. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __popcll(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Reverse the bit order of a 64-bit unsigned integer. + * + * Reverses the bit order of the 64-bit unsigned integer \p x. + * + * \return Returns the bit-reversed value of \p x. i.e. bit N of the return value corresponds to bit 63-N of \p x. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __brevll(unsigned long long int x); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Return selected bytes from two 32-bit unsigned integers. + * + * \return Returns a 32-bit integer consisting of four bytes from eight input bytes provided in the two + * input integers \p x and \p y, as specified by a selector, \p s. + * + * Create 8-byte source + * - uint64_t \p tmp64 = ((uint64_t)\p y << 32) | \p x; + * + * Extract selector bits + * - \p selector0 = (\p s >> 0) & 0x7; + * - \p selector1 = (\p s >> 4) & 0x7; + * - \p selector2 = (\p s >> 8) & 0x7; + * - \p selector3 = (\p s >> 12) & 0x7; + * + * Return 4 selected bytes from 8-byte source: + * - \p res[07:00] = \p tmp64[\p selector0]; + * - \p res[15:08] = \p tmp64[\p selector1]; + * - \p res[23:16] = \p tmp64[\p selector2]; + * - \p res[31:24] = \p tmp64[\p selector3]; + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __byte_perm(unsigned int x, unsigned int y, unsigned int s); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute average of signed input arguments, avoiding overflow + * in the intermediate sum. + * + * Compute average of signed input arguments \p x and \p y + * as ( \p x + \p y ) >> 1, avoiding overflow in the intermediate sum. + * + * \return Returns a signed integer value representing the signed + * average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __hadd(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute rounded average of signed input arguments, avoiding + * overflow in the intermediate sum. + * + * Compute average of signed input arguments \p x and \p y + * as ( \p x + \p y + 1 ) >> 1, avoiding overflow in the intermediate + * sum. + * + * \return Returns a signed integer value representing the signed + * rounded average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __rhadd(int x, int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute average of unsigned input arguments, avoiding overflow + * in the intermediate sum. + * + * Compute average of unsigned input arguments \p x and \p y + * as ( \p x + \p y ) >> 1, avoiding overflow in the intermediate sum. + * + * \return Returns an unsigned integer value representing the unsigned + * average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __uhadd(unsigned int x, unsigned int y); +/** + * \ingroup CUDA_MATH_INTRINSIC_INT + * \brief Compute rounded average of unsigned input arguments, avoiding + * overflow in the intermediate sum. + * + * Compute average of unsigned input arguments \p x and \p y + * as ( \p x + \p y + 1 ) >> 1, avoiding overflow in the intermediate + * sum. + * + * \return Returns an unsigned integer value representing the unsigned + * rounded average value of the two inputs. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __urhadd(unsigned int x, unsigned int y); + +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a + * signed integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ int __double2int_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __double2uint_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to a signed 64-bit int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to a + * signed 64-bit integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ long long int __double2ll_rz(double x); +/** + * \ingroup CUDA_MATH_INTRINSIC_CAST + * \brief Convert a double to an unsigned 64-bit int in round-towards-zero mode. + * + * Convert the double-precision floating-point value \p x to an + * unsigned 64-bit integer value in round-towards-zero mode. + * \return Returns converted value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned long long int __double2ull_rz(double x); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm0(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm1(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm2(void); +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __pm3(void); + +/******************************************************************************* + * * + * FP16 SIMD functions * + * * + *******************************************************************************/ + + // #include "fp16.h" + + +/******************************************************************************* + * * + * SIMD functions * + * * + *******************************************************************************/ + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword absolute value. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes, + * then computes absolute value for each of parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabs2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword absolute value with signed saturation. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes, + * then computes absolute value with signed saturation for each of parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsss2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed addition, with wrap-around: a + b + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then performs unsigned addition on corresponding parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vadd2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword addition with signed saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then performs addition with signed saturation on corresponding parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddss2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword addition with unsigned saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then performs addition with unsigned saturation on corresponding parts. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddus2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed rounded average computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then computes signed rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgs2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned rounded average computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then computes unsigned rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned average computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes, + * then computes unsigned average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vhaddu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if they are equal, and 0000 otherwise. + * For example __vcmpeq2(0x1234aba5, 0x1234aba6) returns 0xffff0000. + * \return Returns 0xffff computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpeq2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a >= b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part >= 'b' part, and 0000 otherwise. + * For example __vcmpges2(0x1234aba5, 0x1234aba6) returns 0xffff0000. + * \return Returns 0xffff if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpges2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a >= b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part >= 'b' part, and 0000 otherwise. + * For example __vcmpgeu2(0x1234aba5, 0x1234aba6) returns 0xffff0000. + * \return Returns 0xffff if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgeu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a > b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part > 'b' part, and 0000 otherwise. + * For example __vcmpgts2(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xffff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a > b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part > 'b' part, and 0000 otherwise. + * For example __vcmpgtu2(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xffff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgtu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a <= b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part <= 'b' part, and 0000 otherwise. + * For example __vcmples2(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xffff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmples2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a <= b ? 0xffff : 0. + * + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part <= 'b' part, and 0000 otherwise. + * For example __vcmpleu2(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xffff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpleu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison: a < b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part < 'b' part, and 0000 otherwise. + * For example __vcmplts2(0x1234aba5, 0x1234aba6) returns 0x0000ffff. + * \return Returns 0xffff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmplts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison: a < b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part < 'b' part, and 0000 otherwise. + * For example __vcmpltu2(0x1234aba5, 0x1234aba6) returns 0x0000ffff. + * \return Returns 0xffff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpltu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison: a != b ? 0xffff : 0. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts result is ffff if 'a' part != 'b' part, and 0000 otherwise. + * For example __vcmplts2(0x1234aba5, 0x1234aba6) returns 0x0000ffff. + * \return Returns 0xffff if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpne2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword absolute difference of unsigned integer computation: |a - b| + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute difference. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed maximum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes signed maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxs2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned maximum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes unsigned maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed minimum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes signed minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmins2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned minimum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes unsigned minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vminu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part == 'b' part. + * If both equalities are satisfied, function returns 1. + * \return Returns 1 if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vseteq2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetges2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned minimum unsigned comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgeu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgtu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned minimum computation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetles2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetleu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetlts2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword unsigned comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetltu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed comparison. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs comparison 'a' part != 'b' part. + * If both conditions are satisfied, function returns 1. + * \return Returns 1 if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetne2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword sum of abs diff of unsigned. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute differences and returns + * sum of those differences. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsadu2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed subtraction, with wrap-around. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs subtraction. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsub2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword (un)signed subtraction, with signed saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs subtraction with signed saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubss2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword subtraction with unsigned saturation. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function performs subtraction with unsigned saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubus2 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword negation. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vneg2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword negation with signed saturation. + * + * Splits 4 bytes of argument into 2 parts, each consisting of 2 bytes. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vnegss2(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-halfword sum of absolute difference of signed integer. + * + * Splits 4 bytes of each into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute difference. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffs2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword sum of absolute difference of signed. + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * For corresponding parts function computes absolute difference and sum it up. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsads2(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute value. + * + * Splits argument by bytes. Computes absolute value of each byte. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabs4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute value with signed saturation. + * + * Splits 4 bytes of argument into 4 parts, each consisting of 1 byte, + * then computes absolute value with signed saturation for each of parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsss4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed addition. + * + * Splits 'a' into 4 bytes, then performs unsigned addition on each of these + * bytes with the corresponding byte from 'b', ignoring overflow. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vadd4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte addition with signed saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte, + * then performs addition with signed saturation on corresponding parts. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddss4 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte addition with unsigned saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte, + * then performs addition with unsigned saturation on corresponding parts. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vaddus4 (unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte signed rounded average. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * then computes signed rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgs4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned rounded average. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * then computes unsigned rounded average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vavgu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte unsigned average. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * then computes unsigned average of corresponding parts. Partial results are + * recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vhaddu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if they are equal, and 00 otherwise. + * For example __vcmpeq4(0x1234aba5, 0x1234aba6) returns 0xffffff00. + * \return Returns 0xff if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpeq4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part >= 'b' part, and 00 otherwise. + * For example __vcmpges4(0x1234aba5, 0x1234aba6) returns 0xffffff00. + * \return Returns 0xff if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpges4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part >= 'b' part, and 00 otherwise. + * For example __vcmpgeu4(0x1234aba5, 0x1234aba6) returns 0xffffff00. + * \return Returns 0xff if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgeu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part > 'b' part, and 00 otherwise. + * For example __vcmpgts4(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part > 'b' part, and 00 otherwise. + * For example __vcmpgtu4(0x1234aba5, 0x1234aba6) returns 0x00000000. + * \return Returns 0xff if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpgtu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part <= 'b' part, and 00 otherwise. + * For example __vcmples4(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmples4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part <= 'b' part, and 00 otherwise. + * For example __vcmpleu4(0x1234aba5, 0x1234aba6) returns 0xffffffff. + * \return Returns 0xff if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpleu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part < 'b' part, and 00 otherwise. + * For example __vcmplts4(0x1234aba5, 0x1234aba6) returns 0x000000ff. + * \return Returns 0xff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmplts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part < 'b' part, and 00 otherwise. + * For example __vcmpltu4(0x1234aba5, 0x1234aba6) returns 0x000000ff. + * \return Returns 0xff if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpltu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts result is ff if 'a' part != 'b' part, and 00 otherwise. + * For example __vcmplts4(0x1234aba5, 0x1234aba6) returns 0x000000ff. + * \return Returns 0xff if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vcmpne4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute difference of unsigned integer. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute difference. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte signed maximum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes signed maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxs4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte unsigned maximum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes unsigned maximum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmaxu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte signed minimum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes signed minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vmins4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte unsigned minimum. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes unsigned minimum. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vminu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part == 'b' part. + * If both equalities are satisfied, function returns 1. + * \return Returns 1 if a = b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vseteq4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetles4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 part, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a <= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetleu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetlts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part <= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a < b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetltu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetges4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part >= 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a >= b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgeu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgts4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte unsigned comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part > 'b' part. + * If both inequalities are satisfied, function returns 1. + * \return Returns 1 if a > b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetgtu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte (un)signed comparison. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs comparison 'a' part != 'b' part. + * If both conditions are satisfied, function returns 1. + * \return Returns 1 if a != b, else returns 0. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsetne4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte sum of abs difference of unsigned. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute differences and returns + * sum of those differences. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsadu4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte subtraction. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs subtraction. Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsub4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte subtraction with signed saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs subtraction with signed saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubss4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte subtraction with unsigned saturation. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function performs subtraction with unsigned saturation. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsubus4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte negation. + * + * Splits 4 bytes of argument into 4 parts, each consisting of 1 byte. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vneg4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-byte negation with signed saturation. + * + * Splits 4 bytes of argument into 4 parts, each consisting of 1 byte. + * For each part function computes negation. Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vnegss4(unsigned int a); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte absolute difference of signed integer. + * + * Splits 4 bytes of each into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute difference. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vabsdiffs4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes per-byte sum of abs difference of signed. + * + * Splits 4 bytes of each argument into 4 parts, each consisting of 1 byte. + * For corresponding parts function computes absolute difference and sum it up. + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_FUNCTIONS_DECL__ __device_builtin__ unsigned int __vsads4(unsigned int a, unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a, b), 0) + * + * Calculates the maximum of \p a and \p b of two signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax_s32_relu(const int a, const int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a, b), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a max with relu ( = max(a_part, b_part, 0) ). Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax_s16x2_relu(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(min(a, b), 0) + * + * Calculates the minimum of \p a and \p b of two signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin_s32_relu(const int a, const int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(min(a, b), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a min with relu ( = max(min(a_part, b_part), 0) ). Partial results + * are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin_s16x2_relu(const unsigned int a, const unsigned int b); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a, b), c) + * + * Calculates the 3-way max of signed integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a 3-way max ( = max(max(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a, b), c) + * + * Calculates the 3-way max of unsigned integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a 3-way max ( = max(max(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(min(a, b), c) + * + * Calculates the 3-way min of signed integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(min(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a 3-way min ( = min(min(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(min(a, b), c) + * + * Calculates the 3-way min of unsigned integers \p a, \p b and \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(min(a, b), c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a 3-way min ( = min(min(a_part, b_part), c_part) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(max(a, b), c), 0) + * + * Calculates the maximum of three signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(max(a, b), c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a three-way max with relu ( = max(a_part, b_part, c_part, 0) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(min(min(a, b), c), 0) + * + * Calculates the minimum of three signed ints, if this is less than \p 0 then \p 0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(min(min(a, b), c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a three-way min with relu ( = max(min(a_part, b_part, c_part), 0) ). + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a + b, c) + * + * Calculates the sum of signed integers \p a and \p b and takes the max with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add and compare: max(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a + b, c) + * + * Calculates the sum of unsigned integers \p a and \p b and takes the max with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs an add and compare: max(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a + b, c) + * + * Calculates the sum of signed integers \p a and \p b and takes the min with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add and compare: min(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a + b, c) + * + * Calculates the sum of unsigned integers \p a and \p b and takes the min with \p c. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u32(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a + b, c) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs an add and compare: min(a_part + b_part), c_part) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u16x2(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(max(a + b, c), 0) + * + * Calculates the sum of signed integers \p a and \p b and takes the max with \p c. + * If the result is less than \p 0 then \0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(max(a + b, c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add, followed by a max with relu: max(max(a_part + b_part), c_part), 0) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(min(a + b, c), 0) + * + * Calculates the sum of signed integers \p a and \p b and takes the min with \p c. + * If the result is less than \p 0 then \0 is returned. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32_relu(const int a, const int b, const int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(min(a + b, c), 0) + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs an add, followed by a min with relu: max(min(a_part + b_part), c_part), 0) + * Partial results are recombined and returned as unsigned int. + * \return Returns computed value. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a, b), also sets the value pointed to by pred to (a >= b). + * + * Calculates the maximum of \p a and \p b of two signed ints. Also sets the value pointed to by \p pred to the value (a >= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmax_s32(const int a, const int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes max(a, b), also sets the value pointed to by pred to (a >= b). + * + * Calculates the maximum of \p a and \p b of two unsigned ints. Also sets the value pointed to by \p pred to the value (a >= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u32(const unsigned int a, const unsigned int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a, b), also sets the value pointed to by pred to (a <= b). + * + * Calculates the minimum of \p a and \p b of two signed ints. Also sets the value pointed to by \p pred to the value (a <= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmin_s32(const int a, const int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Computes min(a, b), also sets the value pointed to by pred to (a <= b). + * + * Calculates the minimum of \p a and \p b of two unsigned ints. Also sets the value pointed to by \p pred to the value (a <= b). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u32(const unsigned int a, const unsigned int b, bool* const pred); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a >= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part >= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part >= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword max(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a >= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part >= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part >= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a <= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as signed shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part <= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part <= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/** + * \ingroup CUDA_MATH_INTRINSIC_SIMD + * \brief Performs per-halfword min(a, b), also sets the value pointed to by pred_hi and pred_lo to the per-halfword result of (a <= b). + * + * Splits 4 bytes of each argument into 2 parts, each consisting of 2 bytes. + * These 2 byte parts are interpreted as unsigned shorts. + * For corresponding parts function performs a maximum ( = max(a_part, b_part) ). + * Partial results are recombined and returned as unsigned int. + * Sets the value pointed to by \p pred_hi to the value (a_high_part <= b_high_part). + * Sets the value pointed to by \p pred_lo to the value (a_low_part <= b_low_part). + * \return Returns computed values. + */ +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo); + +/******************************************************************************* + * * + * END SIMD functions * + * * + *******************************************************************************/ +} //extern "c" +#undef EXCLUDE_FROM_RTC + +#undef __DEVICE_FUNCTIONS_DECL__ +#undef __DEVICE_FUNCTIONS_STATIC_DECL__ +#undef __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ + +#endif /* __cplusplus && __CUDACC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if !defined(__CUDACC_RTC__) +#include "device_functions.hpp" +#endif /* !defined(__CUDACC_RTC__) */ + +#include "device_atomic_functions.h" +#include "device_double_functions.h" +#include "sm_20_atomic_functions.h" +#include "sm_32_atomic_functions.h" +#include "sm_35_atomic_functions.h" +#include "sm_60_atomic_functions.h" +#include "sm_20_intrinsics.h" +#include "sm_30_intrinsics.h" +#include "sm_32_intrinsics.h" +#include "sm_35_intrinsics.h" +#include "sm_61_intrinsics.h" +#include "sm_70_rt.h" +#include "sm_80_rt.h" +#include "sm_90_rt.h" +#include "texture_indirect_functions.h" +#include "surface_indirect_functions.h" +#include "cudacc_ext.h" + +#ifdef __CUDACC__ +extern "C" __host__ __device__ unsigned CUDARTAPI __cudaPushCallConfiguration(dim3 gridDim, + dim3 blockDim, + size_t sharedMem = 0, + struct CUstream_st *stream = 0); +#endif /* __CUDACC__ */ + +#endif /* !__DEVICE_FUNCTIONS_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H__ +#endif + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..88aa76f3cab6c57de39827d88435817171966989 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.hpp @@ -0,0 +1,1197 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__ +#endif + +#if !defined(__DEVICE_FUNCTIONS_HPP__) +#define __DEVICE_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#define __DEVICE_FUNCTIONS_DECL__ __device__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ __device__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ __device__ __host__ __cudart_builtin__ +#else +#define __DEVICE_FUNCTIONS_DECL__ __device__ +#define __DEVICE_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ +#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __host__ __cudart_builtin__ +#endif /* __CUDACC_RTC__ */ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +#undef __DEVICE_FUNCTIONS_DECL__ +#undef __DEVICE_FUNCTIONS_STATIC_DECL__ + +#endif /* __cplusplus && __CUDACC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#ifdef __CUDACC__ +# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) +#define __CUDA_AND_AT_LEAST_SM_90__ +#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) */ +# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) +#define __CUDA_AND_AT_LEAST_SM_70__ +#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) */ +# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) +#define __CUDA_AND_AT_LEAST_SM_75__ +#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) */ +#endif /* __CUDACC__ */ + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax_s32_relu(const int a, const int b){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm("{max.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); + return res; +#else + // Host and older architecture code + int ans = max(a, b); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax_s16x2_relu(const unsigned int a, const unsigned int b){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm("{max.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); +#elif defined(__CUDA_ARCH__) + res = __vmaxs2(__vmaxs2(a, b), 0U); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)max(aS_lo, bS_lo); + short ansS_hi = (short)max(aS_hi, bS_hi); + + // relu + if(ansS_lo < 0){ ansS_lo = 0; } + if(ansS_hi < 0){ ansS_hi = 0; } + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin_s32_relu(const int a, const int b){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm("{min.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); + return res; +#else + // Host and older architecture code + int ans = min(a, b); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin_s16x2_relu(const unsigned int a, const unsigned int b){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm("{min.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b)); +#elif defined(__CUDA_ARCH__) + res = __vmaxs2(__vmins2(a, b), 0U); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)min(aS_lo, bS_lo); + short ansS_hi = (short)min(aS_hi, bS_hi); + + // relu + if(ansS_lo < 0){ ansS_lo = 0; } + if(ansS_hi < 0){ ansS_hi = 0; } + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "max.s32 t1, %1, %2; \n\t" + "max.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(max(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + // Future asm code (naming/syntax may change): + asm ("{.reg .b32 t1; \n\t" + "max.s16x2 t1, %1, %2; \n\t" + "max.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_70__) + res = __vmaxs2(__vmaxs2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ +int res; + asm ("{.reg .u32 t1; \n\t" + "max.u32 t1, %1, %2; \n\t" + "max.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(max(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "max.u16x2 t1, %1, %2; \n\t" + "max.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmaxu2(__vmaxu2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)max(max(aU_lo, bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)max(max(aU_hi, bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "min.s32 t1, %1, %2; \n\t" + "min.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(min(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "min.s16x2 t1, %1, %2; \n\t" + "min.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_70__) + res = __vmins2(__vmins2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .u32 t1; \n\t" + "min.u32 t1, %1, %2; \n\t" + "min.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(min(a, b), c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "min.u16x2 t1, %1, %2; \n\t" + "min.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vminu2(__vminu2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)min(min(aU_lo, bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)min(min(aU_hi, bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "max.s32.relu t1, %1, %2; \n\t" + "max.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = max(max(a, b), c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "max.s16x2.relu t1, %1, %2; \n\t" + "max.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_75__) + res = __vimax_s16x2_relu(__vmaxs2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi); + + // relu + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "min.s32.relu t1, %1, %2; \n\t" + "min.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = min(min(a, b), c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "min.s16x2.relu t1, %1, %2; \n\t" + "min.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_AND_AT_LEAST_SM_75__) + res = __vimin_s16x2_relu(__vmins2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo); + short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi); + + // relu + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "max.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "max.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmaxs2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int res; + asm ("{.reg .u32 t1; \n\t" + "add.u32 t1, %1, %2; \n\t" + "max.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return max(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.u16x2 t1, %1, %2; \n\t" + "max.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmaxu2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)max((unsigned short)(aU_lo + bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)max((unsigned short)(aU_hi + bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "min.s32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "min.s16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vmins2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u32(const unsigned int a, const unsigned int b, const unsigned int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int res; + asm ("{.reg .u32 t1; \n\t" + "add.u32 t1, %1, %2; \n\t" + "min.u32 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + return min(a + b, c); +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.u16x2 t1, %1, %2; \n\t" + "min.u16x2 %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vminu2(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)min((unsigned short)(aU_lo + bU_lo), cU_lo); + unsigned short ansU_hi = (unsigned short)min((unsigned short)(aU_hi + bU_hi), cU_hi); + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "max.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = max(a + b, c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "max.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vimax_s16x2_relu(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi); + + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32_relu(const int a, const int b, const int c){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int res; + asm ("{.reg .s32 t1; \n\t" + "add.s32 t1, %1, %2; \n\t" + "min.s32.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); + return res; +#else + // Host and older architecture code + int ans = min(a + b, c); + + return (ans > 0) ? ans : 0; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){ + unsigned int res; +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + asm ("{.reg .b32 t1; \n\t" + "add.s16x2 t1, %1, %2; \n\t" + "min.s16x2.relu %0, t1, %3;}\n\t" + : "=r"(res) : "r"(a), "r"(b), "r"(c)); +#elif defined(__CUDA_ARCH__) + res = __vimin_s16x2_relu(__vadd2(a, b), c); +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + unsigned short cU_lo = (unsigned short)(c & 0xFFFFU); + unsigned short cU_hi = (unsigned short)(c >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + short cS_lo = *(short*)& cU_lo; + short cS_hi = *(short*)& cU_hi; + + // Get answer + short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo); + short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi); + + if(ansS_lo < 0){ansS_lo = 0;} + if(ansS_hi < 0){ansS_hi = 0;} + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); +#endif + + return res; +} + +// vimax vimin with predicate +// *pred gets set to '(a >= b)' +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmax_s32(const int a, const int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.ge.s32 __$temp1, %2, %3;\n\t" + " selp.s32 %0, %2, %3, __$temp1;\n\t" + " selp.s32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + int ans = max(a, b); + + *pred = (a >= b); + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u32(const unsigned int a, const unsigned int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.ge.u32 __$temp1, %2, %3;\n\t" + " selp.u32 %0, %2, %3, __$temp1;\n\t" + " selp.u32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + unsigned int ans = max(a, b); + + *pred = (a >= b); + return ans; +#endif +} + +// *pred gets set to '(a <= b)' +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmin_s32(const int a, const int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.le.s32 __$temp1, %2, %3;\n\t" + " selp.s32 %0, %2, %3, __$temp1;\n\t" + " selp.s32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + int ans = min(a, b); + + *pred = (a <= b); + return ans; +#endif +} + +// *pred gets set to '(a <= b)' +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u32(const unsigned int a, const unsigned int b, bool* const pred){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local; + asm ("{ .reg .pred __$temp1;\n\t" + " setp.le.u32 __$temp1, %2, %3;\n\t" + " selp.u32 %0, %2, %3, __$temp1;\n\t" + " selp.u32 %1, 1, 0, __$temp1;}\n\t" + : "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b)); + + *pred = (bool)predicate_local; + return val; +#else + // Host and older architecture code + unsigned int ans = min(a, b); + + *pred = (a <= b); + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .s16 rs0, rs1, rs2, rs3; \n\t" + "max.s16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.s16 pv, rs0, rs2; \n\t" + "setp.eq.s16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)max(aS_lo, bS_lo); + short ansS_hi = (short)max(aS_hi, bS_hi); + + *pred_hi = (aS_hi >= bS_hi); + *pred_lo = (aS_lo >= bS_lo); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .u16 rs0, rs1, rs2, rs3; \n\t" + "max.u16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.u16 pv, rs0, rs2; \n\t" + "setp.eq.u16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)max(aU_lo, bU_lo); + unsigned short ansU_hi = (unsigned short)max(aU_hi, bU_hi); + + *pred_hi = (aU_hi >= bU_hi); + *pred_lo = (aU_lo >= bU_lo); + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .u16 rs0, rs1, rs2, rs3; \n\t" + "min.s16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.s16 pv, rs0, rs2; \n\t" + "setp.eq.s16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + //cast to signed: + short aS_lo = *(short*)& aU_lo; + short aS_hi = *(short*)& aU_hi; + + short bS_lo = *(short*)& bU_lo; + short bS_hi = *(short*)& bU_hi; + + // Get answer + short ansS_lo = (short)min(aS_lo, bS_lo); + short ansS_hi = (short)min(aS_hi, bS_hi); + + *pred_hi = (aS_hi <= bS_hi); + *pred_lo = (aS_lo <= bS_lo); + + // Cast back to unsigned: + unsigned short ansU_lo = *(unsigned short*)& ansS_lo; + unsigned short ansU_hi = *(unsigned short*)& ansS_hi; + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){ +#ifdef __CUDA_AND_AT_LEAST_SM_90__ + unsigned int val; + unsigned int predicate_local_hi; + unsigned int predicate_local_lo; + asm ("{.reg .pred pu, pv; \n\t" + ".reg .u16 rs0, rs1, rs2, rs3; \n\t" + "min.u16x2 %0, %3, %4; \n\t" + "mov.b32 {rs0, rs1}, %0; \n\t" + "mov.b32 {rs2, rs3}, %3; \n\t" + "setp.eq.u16 pv, rs0, rs2; \n\t" + "setp.eq.u16 pu, rs1, rs3; \n\t" + "selp.b32 %1, 1, 0, pu; \n\t" + "selp.b32 %2, 1, 0, pv;} \n\t" + : "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b)); + + *pred_hi = (bool)predicate_local_hi; + *pred_lo = (bool)predicate_local_lo; + return val; +#else + // Host and older architecture code + // Separate our high and low bit: + unsigned short aU_lo = (unsigned short)(a & 0xFFFFU); + unsigned short aU_hi = (unsigned short)(a >> 16); + + unsigned short bU_lo = (unsigned short)(b & 0xFFFFU); + unsigned short bU_hi = (unsigned short)(b >> 16); + + // Get answer + unsigned short ansU_lo = (unsigned short)min(aU_lo, bU_lo); + unsigned short ansU_hi = (unsigned short)min(aU_hi, bU_hi); + + *pred_hi = (aU_hi <= bU_hi); + *pred_lo = (aU_lo <= bU_lo); + + // Put answer back together: + unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16); + + return ans; +#endif +} + +#ifdef __CUDA_AND_AT_LEAST_SM_90__ +#undef __CUDA_AND_AT_LEAST_SM_90__ +#endif + +#undef __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#endif /* !__DEVICE_FUNCTIONS_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/func_macro.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/func_macro.h new file mode 100644 index 0000000000000000000000000000000000000000..633554a01aaabd1bca5ae278c276710f323d5d7b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/func_macro.h @@ -0,0 +1,57 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__ +#endif + +#if !defined(__FUNC_MACRO_H__) +#define __FUNC_MACRO_H__ + +#if !defined(__CUDA_INTERNAL_COMPILATION__) + +#error -- incorrect inclusion of a cudart header file + +#endif /* !__CUDA_INTERNAL_COMPILATION__ */ + +#if defined(__GNUC__) + +#define __func__(decl) \ + inline decl + +#define __device_func__(decl) \ + static __attribute__((__unused__)) decl + +#elif defined(_WIN32) + +#define __func__(decl) \ + static inline decl + +#define __device_func__(decl) \ + static decl + +#endif /* __GNUC__ */ + +#endif /* __FUNC_MACRO_H__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3e10f2a982bd2dcf9814a2fc05a3f200d5a1cb07 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.hpp @@ -0,0 +1,1128 @@ +/* + * Copyright 2017-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__ +#endif + +#if !defined(__CUDA_MMA_HPP__) +#define __CUDA_MMA_HPP__ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700 + +#include +#include + +#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 +#define __CUDA_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 +#define __CUDA_SUBBYTE_IMMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */ + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +#define __CUDA_AMPERE_MMA__ 1 +#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */ + +namespace nvcuda { +namespace wmma { + + // + // Load functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0); + else + __hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0); + else + __hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // Load functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0); + else + __hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0); + else + __hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1); + } + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // Load functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm) { + __hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __half* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0); + else + __hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0); + else + __hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1); + } + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const signed char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const unsigned char* p, unsigned ldm) { + __imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const __nv_bfloat16* p, unsigned ldm) { + __mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Load functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_a_s4((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_a_u4((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_b_s4((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __imma_m8n8k32_ld_b_u4((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 1); + } + + // + // Load functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __bmma_m8n8k128_ld_a_b1((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const void* p, unsigned ldm) { + __bmma_m8n8k128_ld_b_b1((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const int* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 0); + else + __bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 1); + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + + +#ifdef __CUDA_AMPERE_MMA__ + // load functions for frags of shape m16n16k8 + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm) { + __mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const float* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 0); + else + __mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 1); + } + + // load functions for frags of shape m8n8k4 + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_a((double *)&a, p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_a((double *)&a, p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_b((double *)&a, p, ldm, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm) { + __dmma_m8n8k4_ld_b((double *)&a, p, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment& a, const double* p, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __dmma_m8n8k4_ld_c((double *)&a, p, ldm, 0); + else + __dmma_m8n8k4_ld_c((double *)&a, p, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // Store functions for frags of shape m16n16k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 0); + else + __hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 0); + else + __hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m32n8k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 0); + else + __hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 0); + else + __hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + + // + // Store functions for frags of shape m8n32k16 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 0); + else + __hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 1); + } + + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 0); + else + __hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 1); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // Store functions for frags of shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 0); + else + __imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 1); + } + + // + // Store functions for frags of shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 0); + else + __bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 1); + } +#endif /* __CUDA_SUBBYTE_IMMA__ */ + + +#ifdef __CUDA_AMPERE_MMA__ + + // + // Store functions for frags of shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 0); + else + __mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 1); + } + + + // + // Store functions for frags of shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment& a, unsigned ldm, layout_t layout) { + if (layout == mem_row_major) + __dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 0); + else + __dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 1); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m16n16k16 + // + // D fp16, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + // D fp16, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0); + } + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1); + else + __imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1); + else + __imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + + // + // MMA functions for shape m32n8k16 + // + // D fp16, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + // D fp16, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + // + // MMA functions for shape m8n32k16 + // + // D fp16, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp16 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0); + } + + // D fp32, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + // D fp16, C fp32 + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + +#ifdef __CUDA_IMMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1); + else + __imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0); + } +#endif /* __CUDA_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } +#endif /* __CUDA_AMPERE_MMA__ */ + + +#ifdef __CUDA_SUBBYTE_IMMA__ + // + // MMA functions for shape m8n8k32 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, bool satf) { + if (satf) + __imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1); + else + __imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0); + } + + // + // MMA functions for shape m8n8k128 + // + __CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c, + experimental::bmmaBitOp op, experimental::bmmaAccumulateOp) + { + +#ifdef __CUDA_AMPERE_MMA__ + if (op == experimental::bmmaBitOpAND) + __bmma_m8n8k128_mma_and_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1); + else +#endif /* __CUDA_AMPERE_MMA__ */ + __bmma_m8n8k128_mma_xor_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1); + } + + +#endif /* __CUDA_SUBBYTE_IMMA__ */ + +#ifdef __CUDA_AMPERE_MMA__ + // + // MMA functions for shape m16n16k8 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0); + } + + + // + // MMA functions for shape m8n8k4 + // + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 1, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 3, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 0, 0); + } + + __CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment& d, const fragment& a, const fragment& b, const fragment& c) { + __dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 2, 0); + } + +#endif /* __CUDA_AMPERE_MMA__ */ + +}; +}; + +#undef __CUDA_IMMA__ +#undef __CUDA_SUBBYTE_IMMA__ +#undef __CUDA_MMA_DEVICE_DECL__ +#undef __CUDA_AMPERE_MMA__ + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */ + +#endif /* __cplusplus && __CUDACC__ */ + + +#endif /* __CUDA_MMA_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/nvfunctional b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/nvfunctional new file mode 100644 index 0000000000000000000000000000000000000000..5cb9ffeb9cb9f1d202cb1f5cb1d4d7e88a416475 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/nvfunctional @@ -0,0 +1,621 @@ +/* + * NVIDIA_COPYRIGHT_BEGIN + * + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + * NVIDIA_COPYRIGHT_END + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead.") +#else +#warning "crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__ +#endif + +#ifndef __NV_LIBCXX_FUNCTIONAL_H__ +#define __NV_LIBCXX_FUNCTIONAL_H__ + +#if __cplusplus < 201103L + #if defined(_MSC_VER) + #if _MSC_VER < 1800 + #error This library requires VS 2013 and above + #endif /* _MSC_VER < 1800 */ + #else /* !_MSC_VER */ + #error This library requires support for the ISO C++ 2011 standard + #endif /* _MSC_VER */ +#endif /* __cplusplus */ + +#if defined(_MSC_VER) + #define __NV_ALIGNOF __alignof + #define __NV_NOEXCEPT + #define __NV_CONSTEXPR +#else /* !_MSC_VER */ + #define __NV_ALIGNOF alignof + #define __NV_NOEXCEPT noexcept + #define __NV_CONSTEXPR constexpr +#endif /* _MSC_VER */ + +#include +#include +#include + +// n3290 20.8 +namespace nvstd +{ + +namespace internal { + +// D.8.1 base (deprecated) [depr.base] +template +struct unary_function +{ + typedef _Arg argument_type; + typedef _Result result_type; +}; + +template +struct binary_function +{ + typedef _Arg1 first_argument_type; + typedef _Arg2 second_argument_type; + typedef _Result result_type; +}; + +// move +template +inline __device__ __host__ +typename std::remove_reference<_T>::type&& move(_T&& __t) __NV_NOEXCEPT +{ + return static_cast::type&&>(__t); +} + +// 20.2.2 swap [utility.swap] +// swap +template::value && + std::is_move_assignable<_T>::value>::type> +inline __device__ __host__ +void swap(_T& __a, _T& __b) +#if !defined(_MSC_VER) +noexcept(std::is_nothrow_move_constructible<_T>::value && + std::is_nothrow_move_assignable<_T>::value) +#endif /* !defined(_MSC_VER) */ +{ + _T __t(internal::move(__a)); + __a = internal::move(__b); + __b = internal::move(__t); +} + +// 20.2.3 forward/move helpers [forward] +// forward +template +inline __device__ __host__ +_T&& forward(typename std::remove_reference<_T>::type& __t) __NV_NOEXCEPT +{ + return static_cast<_T&&>(__t); +} + +template +inline __device__ __host__ +_T&& forward(typename std::remove_reference<_T>::type&& __t) __NV_NOEXCEPT +{ + static_assert(!std::is_lvalue_reference<_T>::value, + "Error: __t is instantiated with an lvalue reference type"); + return static_cast<_T&&>(__t); +} + +} // namespace internal + +namespace __functional_helpers +{ + +struct __dummy_class; + +// Store small functors locally: +// a functor is legitimate to local storage if it is one of the following types: +// * member object pointer; +// * member function pointer; +// * closure type of size less than or equal to the largest size of +// the above types; +// * function pointer; +// * any callable class whose size is less than or equal to +// the largest one of the above types; +union _Small_functor_types +{ + void *__obj; + void (*__func_ptr)(); + void (__dummy_class::*mem_fn_ptr)(); +}; + +struct _Small_functor_data { + char __data[sizeof(_Small_functor_types)]; +}; + +template +struct __maybe_base_function +{ }; + +template +struct __maybe_base_function<_RetType(_T1)> + : public internal::unary_function<_T1, _RetType> +{ }; + +template +struct __maybe_base_function<_RetType(_T1, _T2)> + : public internal::binary_function<_T1, _T2, _RetType> +{ }; + +} // namespace __functional_helpers + +// 20.8.11 Polymorphic function wrappers [func.wrap] + +// 20.8.11.1 Class bad_function_call [func.wrap.badcall] +// unimplemented because of exception +// class bad_function_call : public std::exception + +// 20.8.11.2 Class template function [func.wrap.func] + +template class function; // undefined + +// Simplified version of template class function, which +// * does not support allocator_arg_t; +// * does not support target and target_type that rely on RTTI +// * does not throw bad_function_call exception on invoking a NULL target +template +class function<_RetType(_ArgTypes...)> + : public __functional_helpers::__maybe_base_function<_RetType(_ArgTypes...)> +{ + __functional_helpers::_Small_functor_data __small_functor_data; + void *__obj; + typedef _RetType(*__meta_fn_type)(void *, _ArgTypes...); + __meta_fn_type __meta_fn; + typedef void(*__cloner_type)(function &, const function &); + __cloner_type __cloner; + typedef void(*__destructor_type)(function *); + __destructor_type __destructor; + + #pragma nv_exec_check_disable + template + __device__ __host__ + __NV_CONSTEXPR bool __use_small_functor_data() const + { + return (sizeof(_F) <= sizeof(__small_functor_data) && + __NV_ALIGNOF(_F) <= __NV_ALIGNOF( + __functional_helpers::_Small_functor_types)); + } + + #pragma nv_exec_check_disable + __device__ __host__ + void* __get_small_functor_data() const + { + return (void*)(&__small_functor_data.__data[0]); + } + + #pragma nv_exec_check_disable + __device__ __host__ + bool __is_small_functor_data() const + { + return __obj == __get_small_functor_data(); + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static _F& __get_functor(void *__p) + { + return *((_F*)__p); + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const _F& /*__p*/) + { + return false; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const _F* __p) + { + return !__p; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const _Res _C::* __p) + { + return !__p; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + static bool __is_empty_functor(const function<_Res(_Args...)>& __p) + { + return !__p; + } + + template + struct __make_cloner + { + #pragma nv_exec_check_disable + __device__ __host__ + static void __clone_data(function &__dest, const function &__src) + { + if (__dest.__use_small_functor_data<_F>()) { + __dest.__obj = __dest.__get_small_functor_data(); + new (__dest.__obj) _F(__src.__get_functor<_F>(__src.__obj)); + } + else { + __dest.__obj = new _F(__src.__get_functor<_F>(__src.__obj)); + } + } + }; + + template + struct __make_destructor + { + #pragma nv_exec_check_disable + __device__ __host__ + static void __destruct(function *__fn) + { + if (__fn->__use_small_functor_data<_F>()) { + (__fn->__get_functor<_F>(__fn->__obj)).~_F(); + } + else { + delete (_F*)(__fn->__obj); + } + } + }; + + // We cannot simple define __make_functor in the following way: + // template + // __make_functor; + // template + // struct __make_functor<_RetType1(_ArgTypes1...), _F> + // + // because VS 2013 cannot unpack _RetType1(_ArgTypes1...) + template + struct __make_functor + { + typedef _RetType1 type; + + #pragma nv_exec_check_disable + __device__ __host__ + static _RetType1 __invoke(void *__d, _ArgTypes1... __args) + { + return __get_functor<_F>(__d)( + internal::forward<_ArgTypes1>(__args)...); + } + }; + + template + struct __make_functor<_RetType1, _M _C::*,_ArgTypes1...> + { + typedef _RetType1 type; + typedef _RetType1(*_Fn)(_ArgTypes1...); + + #pragma nv_exec_check_disable + __device__ __host__ + static _RetType1 __invoke(void *__d, _ArgTypes1... __args) + { + return __get_functor<_Fn>(__d)( + internal::forward<_ArgTypes1>(__args)...); + } + }; + +// workaround for GCC version below 4.8 +#if (__GNUC__ == 4) && (__GNUC_MINOR__ < 8) + template + struct __check_callability + : public std::integral_constant::value> + { }; +#elif defined(_MSC_VER) + // simulate VC 2013's behavior... + template + struct __check_callability1 + : public + std::integral_constant::value || + std::is_convertible< + _RetType, + typename std::result_of<_F(_ArgTypes...)>::type + >::value + > + { }; + + template + struct __check_callability + : public std::integral_constant< + bool, + !std::is_same<_F, function>::value && + __check_callability1::type>::value> + { }; +#else /* !((__GNUC__ == 4) && (__GNUC_MINOR__ < 8)) _MSC_VER */ + template ::type> + struct __check_callability + : public std::integral_constant< + bool, + !std::is_same<_F, function>::value && + std::is_convertible< _T, _RetType>::value> + { }; +#endif /* __GNUC__ == 4) && (__GNUC_MINOR__ < 8) */ + + #pragma nv_exec_check_disable + __device__ __host__ + void __destroy() + { + if (__obj) { + __destructor(this); + __obj = 0; + } + } + + #pragma nv_exec_check_disable + __device__ __host__ + void __clear() + { + __obj = 0; + __meta_fn = 0; + __cloner = 0; + __destructor = 0; + } + +public: + typedef _RetType result_type; + +/* + * These typedef(s) are derived from __maybe_base_function + * typedef T1 argument_type; // only if sizeof...(ArgTypes) == 1 and + * // the type in ArgTypes is T1 + * typedef T1 first_argument_type; // only if sizeof...(ArgTypes) == 2 and + * // ArgTypes contains T1 and T2 + * typedef T2 second_argument_type; // only if sizeof...(ArgTypes) == 2 and + * // ArgTypes contains T1 and T2 + */ + + // 20.8.11.2.1 construct/copy/destroy [func.wrap.con] + + #pragma nv_exec_check_disable + __device__ __host__ + function() __NV_NOEXCEPT + : __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {} + + #pragma nv_exec_check_disable + __device__ __host__ + function(std::nullptr_t) __NV_NOEXCEPT + : __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {} + + #pragma nv_exec_check_disable + __device__ __host__ + function(const function &__fn) + { + if (__fn.__obj == 0) { + __clear(); + } + else { + __meta_fn = __fn.__meta_fn; + __destructor = __fn.__destructor; + __fn.__cloner(*this, __fn); + __cloner = __fn.__cloner; + } + } + + #pragma nv_exec_check_disable + __device__ __host__ + function(function &&__fn) + { + __fn.swap(*this); + } + + // VS 2013 cannot process __check_callability type trait. + // So, we check callability using static_assert instead of + // using SFINAE such as + // template::value + // >::type> + + #pragma nv_exec_check_disable + template + __device__ __host__ + function(_F); + + // copy and swap + #pragma nv_exec_check_disable + __device__ __host__ + function& operator=(const function& __fn) + { + function(__fn).swap(*this); + return *this; + } + + #pragma nv_exec_check_disable + __device__ __host__ + function& operator=(function&& __fn) + { + function(internal::move(__fn)).swap(*this); + return *this; + } + + #pragma nv_exec_check_disable + __device__ __host__ + function& operator=(std::nullptr_t) + { + __destroy(); + return *this; + } + + #pragma nv_exec_check_disable + template + __device__ __host__ + function& + operator=(_F&& __fn) + { + static_assert(__check_callability<_F>::value, + "Unable to create functor object!"); + function(internal::forward<_F>(__fn)).swap(*this); + return *this; + } + + #pragma nv_exec_check_disable + __device__ __host__ + ~function() + { + __destroy(); + } + + // 20.8.11.2.2 function modifiers [func.wrap.func.mod] + #pragma nv_exec_check_disable + __device__ __host__ + void swap(function& __fn) __NV_NOEXCEPT + { + internal::swap(__meta_fn, __fn.__meta_fn); + internal::swap(__cloner, __fn.__cloner); + internal::swap(__destructor, __fn.__destructor); + + if (__is_small_functor_data() && __fn.__is_small_functor_data()) { + internal::swap(__small_functor_data, __fn.__small_functor_data); + } + else if (__is_small_functor_data()) { + internal::swap(__small_functor_data, __fn.__small_functor_data); + internal::swap(__obj, __fn.__obj); + __fn.__obj = __fn.__get_small_functor_data(); + } + else if (__fn.__is_small_functor_data()) { + internal::swap(__small_functor_data, __fn.__small_functor_data); + internal::swap(__obj, __fn.__obj); + __obj = __get_small_functor_data(); + } + else { + internal::swap(__obj, __fn.__obj); + } + } + + // 20.8.11.2.3 function capacity [func.wrap.func.cap] + #pragma nv_exec_check_disable + __device__ __host__ + explicit operator bool() const __NV_NOEXCEPT + { + return __obj; + } + + // 20.8.11.2.4 function invocation [func.wrap.func.inv] + // function::operator() can only be called in device code + // to avoid cross-execution space calls + #pragma nv_exec_check_disable + __device__ __host__ + _RetType operator()(_ArgTypes...) const; + +}; + +// Out-of-line definitions +#pragma nv_exec_check_disable +template +template +__device__ __host__ +function<_RetType(_ArgTypes...)>::function(_F __fn) + : __obj(0), __meta_fn(0), __cloner(0), __destructor(0) +{ + static_assert(__check_callability<_F>::value, + "Unable to construct functor object!"); + if (__is_empty_functor(__fn)) + return; + __meta_fn = &__make_functor<_RetType, _F, _ArgTypes...>::__invoke; + __cloner = &__make_cloner<_F>::__clone_data; + __destructor = &__make_destructor<_F>::__destruct; + + if (__use_small_functor_data<_F>()) { + __obj = __get_small_functor_data(); + new ((void*)__obj) _F(internal::move(__fn)); + } + else { + __obj = new _F(internal::move(__fn)); + } +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +_RetType +function<_RetType(_ArgTypes...)>::operator()(_ArgTypes... __args) const +{ + return __meta_fn(__obj, internal::forward<_ArgTypes>(__args)...); +} + +// 20.8.11.2.6, Null pointer comparisons: + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator==(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t) +__NV_NOEXCEPT +{ + return !__fn; +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator==(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn) +__NV_NOEXCEPT +{ + return !__fn; +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator!=(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t) +__NV_NOEXCEPT +{ + return static_cast(__fn); +} + +#pragma nv_exec_check_disable +template +__device__ __host__ +bool operator!=(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn) +__NV_NOEXCEPT +{ + return static_cast(__fn); +} + +// 20.8.11.2.7, specialized algorithms: +#pragma nv_exec_check_disable +template +__device__ __host__ +void swap(function<_R(_ArgTypes...)>& __fn1, function<_R(_ArgTypes...)>& __fn2) +{ + __fn1.swap(__fn2); +} + +} // namespace nvstd + +#undef __NV_NOEXCEPT +#undef __NV_CONSTEXPR +#undef __NV_ALIGNOF + +#endif // __NV_LIBCXX_FUNCTIONAL_H__ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.hpp b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.hpp new file mode 100644 index 0000000000000000000000000000000000000000..4e61ac78b996fa03cadf60208bbd58f2e781f3ec --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.hpp @@ -0,0 +1,248 @@ +/* + * Copyright 2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#if defined(_MSC_VER) +#pragma message("crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.") +#else +#warning "crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead." +#endif +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__ +#endif + +#if !defined(__SM_90_RT_HPP__) +#define __SM_90_RT_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_90_RT_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_90_RT_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "device_types.h" +#include "host_defines.h" + +/******************************************************************************* +* * +* Below are implementations of SM-9.0 builtin functions which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ +extern "C" { + __device__ unsigned __nv_isClusterShared_impl(const void *); + __device__ void * __nv_cluster_map_shared_rank_impl(const void *, unsigned); + __device__ unsigned __nv_cluster_query_shared_rank_impl(const void *); + __device__ unsigned __nv_clusterDimIsSpecifed_impl(); + __device__ void __nv_clusterDim_impl(unsigned *, unsigned *, unsigned *); + __device__ void __nv_clusterRelativeBlockIdx_impl(unsigned *, + unsigned *, unsigned *); + __device__ void __nv_clusterGridDimInClusters_impl(unsigned *, + unsigned *, unsigned *); + __device__ void __nv_clusterIdx_impl(unsigned *, unsigned *, unsigned *); + __device__ unsigned __nv_clusterRelativeBlockRank_impl(); + __device__ unsigned __nv_clusterSizeInBlocks_impl(); + __device__ void __nv_cluster_barrier_arrive_impl(); + __device__ void __nv_cluster_barrier_arrive_relaxed_impl(); + __device__ void __nv_cluster_barrier_wait_impl(); + __device__ void __nv_threadfence_cluster_impl(); + + __device__ __device_builtin__ float2 __f2AtomicAdd(float2 *, float2); + __device__ __device_builtin__ float2 __f2AtomicAdd_block(float2 *, float2); + __device__ __device_builtin__ float2 __f2AtomicAdd_system(float2 *, float2); + __device__ __device_builtin__ float4 __f4AtomicAdd(float4 *, float4); + __device__ __device_builtin__ float4 __f4AtomicAdd_block(float4 *, float4); + __device__ __device_builtin__ float4 __f4AtomicAdd_system(float4 *, float4); +} // extern "C" + +__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr) +{ + return __isShared(ptr); +} + +__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr) +{ + return __nv_isClusterShared_impl(ptr); +} + +__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr, + unsigned target_block_rank) +{ + return __nv_cluster_map_shared_rank_impl(ptr, target_block_rank); +} + +__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr) +{ + return __nv_cluster_query_shared_rank_impl(ptr); +} + +__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr, + unsigned int cluster_cta_mask) +{ + return make_uint2((unsigned)__cvta_generic_to_shared(ptr), cluster_cta_mask); +} + +__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified() +{ + return __nv_clusterDimIsSpecifed_impl(); +} + +__SM_90_RT_DECL__ dim3 __clusterDim() +{ + unsigned x, y, z; + __nv_clusterDim_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx() +{ + unsigned x, y, z; + __nv_clusterRelativeBlockIdx_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters() +{ + unsigned x, y, z; + __nv_clusterGridDimInClusters_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ dim3 __clusterIdx() +{ + unsigned x, y, z; + __nv_clusterIdx_impl(&x, &y, &z); + return dim3(x,y,z); +} + +__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank() +{ + return __nv_clusterRelativeBlockRank_impl(); +} + +__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks() +{ + return __nv_clusterSizeInBlocks_impl(); +} + +__SM_90_RT_DECL__ void __cluster_barrier_arrive() +{ + __nv_cluster_barrier_arrive_impl(); +} + +__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed() +{ + __nv_cluster_barrier_arrive_relaxed_impl(); +} + +__SM_90_RT_DECL__ void __cluster_barrier_wait() +{ + __nv_cluster_barrier_wait_impl(); +} + +__SM_90_RT_DECL__ void __threadfence_cluster() +{ + __nv_threadfence_cluster_impl(); +} + + +/* Define __PTR for atomicAdd prototypes below, undef after done */ +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __PTR "l" +#else +#define __PTR "r" +#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/ + +__SM_90_RT_DECL__ float2 atomicAdd(float2 *address, float2 val) { + return __f2AtomicAdd(address, val); +} + +__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *address, float2 val) { + return __f2AtomicAdd_block(address, val); +} + +__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *address, float2 val) { + return __f2AtomicAdd_system(address, val); +} + +__SM_90_RT_DECL__ float4 atomicAdd(float4 *address, float4 val) { + return __f4AtomicAdd(address, val); +} + +__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *address, float4 val) { + return __f4AtomicAdd_block(address, val); +} + +__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *address, float4 val) { + return __f4AtomicAdd_system(address, val); +} + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_90_RT_DECL__ + +#endif /* !__SM_90_RT_HPP__ */ + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__ +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExt.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExt.h new file mode 100644 index 0000000000000000000000000000000000000000..9fab7d874340fad50d63d80b6b68f678ec243191 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExt.h @@ -0,0 +1,1499 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +/** \file nvToolsExt.h + */ + +/* ========================================================================= */ +/** \mainpage + * \tableofcontents + * \section INTRODUCTION Introduction + * + * The NVIDIA Tools Extension library is a set of functions that a + * developer can use to provide additional information to tools. + * The additional information is used by the tool to improve + * analysis and visualization of data. + * + * The library introduces close to zero overhead if no tool is + * attached to the application. The overhead when a tool is + * attached is specific to the tool. + * + * \section INITIALIZATION_SECTION Initialization + * + * Typically the tool's library that plugs into NVTX is indirectly + * loaded via enviromental properties that are platform specific. + * For some platform or special cases, the user may be required + * to instead explicity initialize instead though. This can also + * be helpful to control when the API loads a tool's library instead + * of what would typically be the first function call to emit info. + * For these rare case, see \ref INITIALIZATION for additional information. + * + * \section MARKERS_AND_RANGES Markers and Ranges + * + * Markers and ranges are used to describe events at a specific time (markers) + * or over a time span (ranges) during the execution of the application + * respectively. + * + * \subsection MARKERS Markers + * + * Markers denote specific moments in time. + * + * + * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on + * how to specify the domain. + * + * \subsection THREAD_RANGES Thread Ranges + * + * Thread ranges denote nested time ranges. Nesting is maintained per thread + * per domain and does not require any additional correlation mechanism. The + * duration of a thread range is defined by the corresponding pair of + * nvtxRangePush* to nvtxRangePop API calls. + * + * See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on + * how to specify the domain. + * + * \subsection PROCESS_RANGES Process Ranges + * + * Process ranges denote a time span that can expose arbitrary concurrency, as + * opposed to thread ranges that only support nesting. In addition the range + * start event can happen on a different thread than the end marker. For the + * correlation of a start/end pair an unique correlation ID is used that is + * returned from the start API call and needs to be passed into the end API + * call. + * + * \subsection EVENT_ATTRIBUTES Event Attributes + * + * \ref MARKERS_AND_RANGES can be annotated with various attributes to provide + * additional information for an event or to guide the tool's visualization of + * the data. Each of the attributes is optional and if left unused the + * attributes fall back to a default value. The attributes include: + * - color + * - category + * + * To specify any attribute other than the text message, the \ref + * EVENT_ATTRIBUTE_STRUCTURE "Event Attribute Structure" must be used. + * + * \section DOMAINS Domains + * + * Domains enable developers to scope annotations. By default all events and + * annotations are in the default domain. Additional domains can be registered. + * This allows developers to scope markers, ranges, and resources names to + * avoid conflicts. + * + * The function ::nvtxDomainCreateA or ::nvtxDomainCreateW is used to create + * a named domain. + * + * Each domain maintains its own + * - categories + * - thread range stacks + * - registered strings + * + * The function ::nvtxDomainDestroy marks the end of the domain. Destroying + * a domain unregisters and destroys all objects associated with it such as + * registered strings, resource objects, named categories, and started ranges. + * + * \section RESOURCE_NAMING Resource Naming + * + * This section covers calls that allow to annotate objects with user-provided + * names in order to allow for a better analysis of complex trace data. All of + * the functions take the handle or the ID of the object to name and the name. + * The functions can be called multiple times during the execution of an + * application, however, in that case it is implementation dependent which + * name will be reported by the tool. + * + * \subsection CATEGORY_NAMING Category Naming + * + * Some function in this library support associating an integer category + * to enable filtering and sorting. The category naming functions allow + * the application to associate a user friendly name with the integer + * category. Support for domains have been added in NVTX_VERSION_2 to + * avoid collisions when domains are developed independantly. + * + * \subsection RESOURCE_OBJECTS Resource Objects + * + * Resource objects are a generic mechanism for attaching data to an application + * resource. The identifier field makes the association to a pointer or handle, + * while the type field helps provide deeper understanding of the identifier as + * well as enabling differentiation in cases where handles generated by different + * APIs may collide. The resource object may also have an associated message to + * associate with the application resource, enabling further annotation of this + * object and how it is used. + * + * The resource object was introduced in NVTX_VERSION_2 to supersede existing naming + * functions and allow the application resource identified by those functions to be + * associated to a domain. The other naming functions are still supported for backward + * compatibility but will be associated only to the default domain. + * + * \subsection RESOURCE_NAMING_OS Resource Naming + * + * Some operating system resources creation APIs do not support providing a user friendly + * name, such as some OS thread creation APIs. This API support resource naming though + * both through resource objects and functions following the pattern + * nvtxName[RESOURCE_TYPE][A|W](identifier, name). Resource objects introduced in NVTX_VERSION 2 + * supersede the other functions with a a more general method of assigning names to OS resources, + * along with associating them to domains too. The older nvtxName* functions are only associated + * with the default domain. + * \section EXTENSIONS Optional Extensions + * Optional extensions will either appear within the existing sections the extend or appear + * in the "Related Pages" when they introduce new concepts. + */ + + /** + * Tools Extension API version + */ +#if defined(NVTX_VERSION) && NVTX_VERSION < 3 +#error "Trying to #include NVTX version 3 in a source file where an older NVTX version has already been included. If you are not directly using NVTX (the NVIDIA Tools Extension library), you are getting this error because libraries you are using have included different versions of NVTX. Suggested solutions are: (1) reorder #includes so the newest NVTX version is included first, (2) avoid using the conflicting libraries in the same .c/.cpp file, or (3) update the library using the older NVTX version to use the newer version instead." +#endif + +/* Header guard */ +#if !defined(NVTX_VERSION) +#define NVTX_VERSION 3 + +#if defined(_MSC_VER) +#define NVTX_API __stdcall +#define NVTX_INLINE_STATIC __inline static +#else /*defined(__GNUC__)*/ +#define NVTX_API +#define NVTX_INLINE_STATIC inline static +#endif /* Platform */ + +#if defined(NVTX_NO_IMPL) +/* When omitting implementation, avoid declaring functions inline */ +/* without definitions, since this causes compiler warnings. */ +#define NVTX_DECLSPEC +#elif defined(NVTX_EXPORT_API) +/* Allow overriding definition of NVTX_DECLSPEC when exporting API. */ +/* Default is empty, meaning non-inline with external linkage. */ +#if !defined(NVTX_DECLSPEC) +#define NVTX_DECLSPEC +#endif +#else +/* Normal NVTX usage defines the NVTX API inline with static */ +/* (internal) linkage. */ +#define NVTX_DECLSPEC NVTX_INLINE_STATIC +#endif + +#include "nvtxDetail/nvtxLinkOnce.h" + +#define NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION) NAME##_v##VERSION +#define NVTX_VERSIONED_IDENTIFIER_L2(NAME, VERSION) NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION) +#define NVTX_VERSIONED_IDENTIFIER(NAME) NVTX_VERSIONED_IDENTIFIER_L2(NAME, NVTX_VERSION) + +/** + * The nvToolsExt library depends on stdint.h. If the build tool chain in use + * does not include stdint.h then define NVTX_STDINT_TYPES_ALREADY_DEFINED + * and define the following types: + *
    + *
  • uint8_t + *
  • int8_t + *
  • uint16_t + *
  • int16_t + *
  • uint32_t + *
  • int32_t + *
  • uint64_t + *
  • int64_t + *
  • uintptr_t + *
  • intptr_t + *
+ * #define NVTX_STDINT_TYPES_ALREADY_DEFINED if you are using your own header file. + */ +#ifndef NVTX_STDINT_TYPES_ALREADY_DEFINED +#include +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** +* Result Codes +*/ + +#define NVTX_SUCCESS 0 +#define NVTX_FAIL 1 +#define NVTX_ERR_INIT_LOAD_PROPERTY 2 +#define NVTX_ERR_INIT_ACCESS_LIBRARY 3 +#define NVTX_ERR_INIT_LOAD_LIBRARY 4 +#define NVTX_ERR_INIT_MISSING_LIBRARY_ENTRY_POINT 5 +#define NVTX_ERR_INIT_FAILED_LIBRARY_ENTRY_POINT 6 +#define NVTX_ERR_NO_INJECTION_LIBRARY_AVAILABLE 7 + +/** + * Size of the nvtxEventAttributes_t structure. + */ +#define NVTX_EVENT_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxEventAttributes_t) ) ) + +#define NVTX_NO_PUSH_POP_TRACKING ((int)-2) + +typedef uint64_t nvtxRangeId_t; + +/* Forward declaration of opaque domain registration structure */ +struct nvtxDomainRegistration_st; +typedef struct nvtxDomainRegistration_st nvtxDomainRegistration; + +/* \brief Domain Handle Structure. +* \anchor DOMAIN_HANDLE_STRUCTURE +* +* This structure is opaque to the user and is used as a handle to reference +* a domain. This type is returned from tools when using the NVTX API to +* create a domain. +* +*/ +typedef nvtxDomainRegistration* nvtxDomainHandle_t; + +/* Forward declaration of opaque string registration structure */ +struct nvtxStringRegistration_st; +typedef struct nvtxStringRegistration_st nvtxStringRegistration; + +/* \brief Registered String Handle Structure. +* \anchor REGISTERED_STRING_HANDLE_STRUCTURE +* +* This structure is opaque to the user and is used as a handle to reference +* a registered string. This type is returned from tools when using the NVTX +* API to create a registered string. +* +*/ +typedef nvtxStringRegistration* nvtxStringHandle_t; + +/* ========================================================================= */ +/** \defgroup GENERAL General + * @{ + */ + +/** --------------------------------------------------------------------------- + * Color Types + * ------------------------------------------------------------------------- */ +typedef enum nvtxColorType_t +{ + NVTX_COLOR_UNKNOWN = 0, /**< Color attribute is unused. */ + NVTX_COLOR_ARGB = 1 /**< An ARGB color is provided. */ +} nvtxColorType_t; + +/** --------------------------------------------------------------------------- + * Message Types + * ------------------------------------------------------------------------- */ +typedef enum nvtxMessageType_t +{ + NVTX_MESSAGE_UNKNOWN = 0, /**< Message payload is unused. */ + NVTX_MESSAGE_TYPE_ASCII = 1, /**< A character sequence is used as payload. */ + NVTX_MESSAGE_TYPE_UNICODE = 2, /**< A wide character sequence is used as payload. */ + /* NVTX_VERSION_2 */ + NVTX_MESSAGE_TYPE_REGISTERED = 3, /**< A unique string handle that was registered + with \ref nvtxDomainRegisterStringA() or + \ref nvtxDomainRegisterStringW(). */ +} nvtxMessageType_t; + +typedef union nvtxMessageValue_t +{ + const char* ascii; + const wchar_t* unicode; + /* NVTX_VERSION_2 */ + nvtxStringHandle_t registered; +} nvtxMessageValue_t; + + +/** @} */ /*END defgroup*/ +/* ------------------------------------------------------------------------- */ +/** \brief Force initialization (optional) +* +* Force NVTX library to initialize. The first call to any NVTX API function +* will automatically initialize the entire API. This can make the first call +* much slower than subsequent calls. In applications where the first call to +* NVTX may be in a performance-critical section, calling nvtxInitialize before +* any performance-critical sections will ensure NVTX initialization occurs at +* an acceptable time. Since nvtxInitialize takes no parameters and has no +* expected behavior besides initialization, it is convenient to add a call to +* nvtxInitialize in NVTX-instrumented applications that need to force earlier +* initialization without changing any other code. For example, if an app's +* first NVTX call is nvtxDomainCreate, and it is difficult to move that call +* earlier because the domain handle must be stored in an object only created +* at that point, adding a call to nvtxInitialize at the top of main() will +* ensure the later call to nvtxDomainCreate is as fast as possible. +* +* \version \NVTX_VERSION_3 +* +* \param reserved - must be zero or NULL. +* +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxInitialize(const void* reserved); +/** @} */ + + +/** @} */ /*END defgroup*/ + +/* ========================================================================= */ +/** \defgroup EVENT_ATTRIBUTES Event Attributes +* @{ +*/ + +/** --------------------------------------------------------------------------- +* Payload Types +* ------------------------------------------------------------------------- */ +typedef enum nvtxPayloadType_t +{ + NVTX_PAYLOAD_UNKNOWN = 0, /**< Color payload is unused. */ + NVTX_PAYLOAD_TYPE_UNSIGNED_INT64 = 1, /**< A 64 bit unsigned integer value is used as payload. */ + NVTX_PAYLOAD_TYPE_INT64 = 2, /**< A 64 bit signed integer value is used as payload. */ + NVTX_PAYLOAD_TYPE_DOUBLE = 3, /**< A 64 bit floating point value is used as payload. */ + /* NVTX_VERSION_2 */ + NVTX_PAYLOAD_TYPE_UNSIGNED_INT32 = 4, /**< A 32 bit floating point value is used as payload. */ + NVTX_PAYLOAD_TYPE_INT32 = 5, /**< A 32 bit floating point value is used as payload. */ + NVTX_PAYLOAD_TYPE_FLOAT = 6 /**< A 32 bit floating point value is used as payload. */ +} nvtxPayloadType_t; + +/** \brief Event Attribute Structure. + * \anchor EVENT_ATTRIBUTE_STRUCTURE + * + * This structure is used to describe the attributes of an event. The layout of + * the structure is defined by a specific version of the tools extension + * library and can change between different versions of the Tools Extension + * library. + * + * \par Initializing the Attributes + * + * The caller should always perform the following three tasks when using + * attributes: + *
    + *
  • Zero the structure + *
  • Set the version field + *
  • Set the size field + *
+ * + * Zeroing the structure sets all the event attributes types and values + * to the default value. + * + * The version and size field are used by the Tools Extension + * implementation to handle multiple versions of the attributes structure. + * + * It is recommended that the caller use one of the following to methods + * to initialize the event attributes structure: + * + * \par Method 1: Initializing nvtxEventAttributes for future compatibility + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * \endcode + * + * \par Method 2: Initializing nvtxEventAttributes for a specific version + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = 1; + * eventAttrib.size = (uint16_t)(sizeof(nvtxEventAttributes_v1)); + * \endcode + * + * If the caller uses Method 1 it is critical that the entire binary + * layout of the structure be configured to 0 so that all fields + * are initialized to the default value. + * + * The caller should either use both NVTX_VERSION and + * NVTX_EVENT_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values + * and a versioned type (Method 2). Using a mix of the two methods + * will likely cause either source level incompatibility or binary + * incompatibility in the future. + * + * \par Settings Attribute Types and Values + * + * + * \par Example: + * \code + * // Initialize + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * + * // Configure the Attributes + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFF880000; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Example"; + * \endcode + * + * In the example the caller does not have to set the value of + * \ref ::nvtxEventAttributes_v2::category or + * \ref ::nvtxEventAttributes_v2::payload as these fields were set to + * the default value by {0}. + * \sa + * ::nvtxDomainMarkEx + * ::nvtxDomainRangeStartEx + * ::nvtxDomainRangePushEx + */ +typedef struct nvtxEventAttributes_v2 +{ + /** + * \brief Version flag of the structure. + * + * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs + * supported in this header file. This can optionally be overridden to + * another version of the tools extension library. + */ + uint16_t version; + + /** + * \brief Size of the structure. + * + * Needs to be set to the size in bytes of the event attribute + * structure used to specify the event. + */ + uint16_t size; + + /** + * \brief ID of the category the event is assigned to. + * + * A category is a user-controlled ID that can be used to group + * events. The tool may use category IDs to improve filtering or + * enable grouping of events in the same category. The functions + * \ref ::nvtxNameCategoryA or \ref ::nvtxNameCategoryW can be used + * to name a category. + * + * Default Value is 0 + */ + uint32_t category; + + /** \brief Color type specified in this attribute structure. + * + * Defines the color format of the attribute structure's \ref COLOR_FIELD + * "color" field. + * + * Default Value is NVTX_COLOR_UNKNOWN + */ + int32_t colorType; /* nvtxColorType_t */ + + /** \brief Color assigned to this event. \anchor COLOR_FIELD + * + * The color that the tool should use to visualize the event. + */ + uint32_t color; + + /** + * \brief Payload type specified in this attribute structure. + * + * Defines the payload format of the attribute structure's \ref PAYLOAD_FIELD + * "payload" field. + * + * Default Value is NVTX_PAYLOAD_UNKNOWN + */ + int32_t payloadType; /* nvtxPayloadType_t */ + + int32_t reserved0; + + /** + * \brief Payload assigned to this event. \anchor PAYLOAD_FIELD + * + * A numerical value that can be used to annotate an event. The tool could + * use the payload data to reconstruct graphs and diagrams. + */ + union payload_t + { + uint64_t ullValue; + int64_t llValue; + double dValue; + /* NVTX_VERSION_2 */ + uint32_t uiValue; + int32_t iValue; + float fValue; + } payload; + + /** \brief Message type specified in this attribute structure. + * + * Defines the message format of the attribute structure's \ref MESSAGE_FIELD + * "message" field. + * + * Default Value is NVTX_MESSAGE_UNKNOWN + */ + int32_t messageType; /* nvtxMessageType_t */ + + /** \brief Message assigned to this attribute structure. \anchor MESSAGE_FIELD + * + * The text message that is attached to an event. + */ + nvtxMessageValue_t message; + +} nvtxEventAttributes_v2; + +typedef struct nvtxEventAttributes_v2 nvtxEventAttributes_t; + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup MARKERS_AND_RANGES Markers and Ranges + * + * See \ref MARKERS_AND_RANGES for more details + * + * @{ + */ + +/** \name Marker */ + +/* ------------------------------------------------------------------------- */ +/** \brief Marks an instantaneous event in the application. +* +* A marker can contain a text message or specify additional information +* using the event attributes structure. These attributes include a text +* message, color, category, and a payload. Each of the attributes is optional +* and can only be sent out using the \ref nvtxDomainMarkEx function. +* +* nvtxDomainMarkEx(NULL, event) is equivalent to calling +* nvtxMarkEx(event). +* +* \param domain - The domain of scoping the category. +* \param eventAttrib - The event attribute structure defining the marker's +* attribute types and attribute values. +* +* \sa +* ::nvtxMarkEx +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Marks an instantaneous event in the application. + * + * A marker can contain a text message or specify additional information + * using the event attributes structure. These attributes include a text + * message, color, category, and a payload. Each of the attributes is optional + * and can only be sent out using the \ref nvtxMarkEx function. + * If \ref nvtxMarkA or \ref nvtxMarkW are used to specify the marker + * or if an attribute is unspecified then a default value will be used. + * + * \param eventAttrib - The event attribute structure defining the marker's + * attribute types and attribute values. + * + * \par Example: + * \code + * // zero the structure + * nvtxEventAttributes_t eventAttrib = {0}; + * // set the version and the size information + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * // configure the attributes. 0 is the default for all attributes. + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFF880000; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Example nvtxMarkEx"; + * nvtxMarkEx(&eventAttrib); + * \endcode + * + * \sa + * ::nvtxDomainMarkEx + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Marks an instantaneous event in the application. + * + * A marker created using \ref nvtxMarkA or \ref nvtxMarkW contains only a + * text message. + * + * \param message - The message associated to this marker event. + * + * \par Example: + * \code + * nvtxMarkA("Example nvtxMarkA"); + * nvtxMarkW(L"Example nvtxMarkW"); + * \endcode + * + * \sa + * ::nvtxDomainMarkEx + * ::nvtxMarkEx + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message); +NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message); +/** @} */ + + +/** \name Process Ranges */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a process range in a domain. +* +* \param domain - The domain of scoping the category. +* \param eventAttrib - The event attribute structure defining the range's +* attribute types and attribute values. +* +* \return The unique ID used to correlate a pair of Start and End events. +* +* \remarks Ranges defined by Start/End can overlap. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; +* eventAttrib.message.ascii = "my range"; +* nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib); +* // ... +* nvtxDomainRangeEnd(rangeId); +* \endcode +* +* \sa +* ::nvtxDomainRangeEnd +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a process range. + * + * \param eventAttrib - The event attribute structure defining the range's + * attribute types and attribute values. + * + * \return The unique ID used to correlate a pair of Start and End events. + * + * \remarks Ranges defined by Start/End can overlap. + * + * \par Example: + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * eventAttrib.category = 3; + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFF0088FF; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Example Range"; + * nvtxRangeId_t rangeId = nvtxRangeStartEx(&eventAttrib); + * // ... + * nvtxRangeEnd(rangeId); + * \endcode + * + * \sa + * ::nvtxRangeEnd + * ::nvtxDomainRangeStartEx + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a process range. + * + * \param message - The event message associated to this range event. + * + * \return The unique ID used to correlate a pair of Start and End events. + * + * \remarks Ranges defined by Start/End can overlap. + * + * \par Example: + * \code + * nvtxRangeId_t r1 = nvtxRangeStartA("Range 1"); + * nvtxRangeId_t r2 = nvtxRangeStartW(L"Range 2"); + * nvtxRangeEnd(r1); + * nvtxRangeEnd(r2); + * \endcode + * + * \sa + * ::nvtxRangeEnd + * ::nvtxRangeStartEx + * ::nvtxDomainRangeStartEx + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message); +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a process range. +* +* \param domain - The domain +* \param id - The correlation ID returned from a nvtxRangeStart call. +* +* \remarks This function is offered completeness but is an alias for ::nvtxRangeEnd. +* It does not need a domain param since that is associated iwth the range ID at ::nvtxDomainRangeStartEx +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; +* eventAttrib.message.ascii = "my range"; +* nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib); +* // ... +* nvtxDomainRangeEnd(rangeId); +* \endcode +* +* \sa +* ::nvtxDomainRangeStartEx +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a process range. + * + * \param id - The correlation ID returned from an nvtxRangeStart call. + * + * \sa + * ::nvtxDomainRangeStartEx + * ::nvtxRangeStartEx + * ::nvtxRangeStartA + * ::nvtxRangeStartW + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id); +/** @} */ + +/** \name Thread Ranges */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a nested thread range. +* +* \param domain - The domain of scoping. +* \param eventAttrib - The event attribute structure defining the range's +* attribute types and attribute values. +* +* \return The 0 based level of range being started. This value is scoped to the domain. +* If an error occurs, a negative value is returned. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.colorType = NVTX_COLOR_ARGB; +* eventAttrib.color = 0xFFFF0000; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; +* eventAttrib.message.ascii = "Level 0"; +* nvtxDomainRangePushEx(domain, &eventAttrib); +* +* // Re-use eventAttrib +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE; +* eventAttrib.message.unicode = L"Level 1"; +* nvtxDomainRangePushEx(domain, &eventAttrib); +* +* nvtxDomainRangePop(domain); //level 1 +* nvtxDomainRangePop(domain); //level 0 +* \endcode +* +* \sa +* ::nvtxDomainRangePop +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a nested thread range. + * + * \param eventAttrib - The event attribute structure defining the range's + * attribute types and attribute values. + * + * \return The 0 based level of range being started. This level is per domain. + * If an error occurs a negative value is returned. + * + * \par Example: + * \code + * nvtxEventAttributes_t eventAttrib = {0}; + * eventAttrib.version = NVTX_VERSION; + * eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; + * eventAttrib.colorType = NVTX_COLOR_ARGB; + * eventAttrib.color = 0xFFFF0000; + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; + * eventAttrib.message.ascii = "Level 0"; + * nvtxRangePushEx(&eventAttrib); + * + * // Re-use eventAttrib + * eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE; + * eventAttrib.message.unicode = L"Level 1"; + * nvtxRangePushEx(&eventAttrib); + * + * nvtxRangePop(); + * nvtxRangePop(); + * \endcode + * + * \sa + * ::nvtxDomainRangePushEx + * ::nvtxRangePop + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Starts a nested thread range. + * + * \param message - The event message associated to this range event. + * + * \return The 0 based level of range being started. If an error occurs a + * negative value is returned. + * + * \par Example: + * \code + * nvtxRangePushA("Level 0"); + * nvtxRangePushW(L"Level 1"); + * nvtxRangePop(); + * nvtxRangePop(); + * \endcode + * + * \sa + * ::nvtxDomainRangePushEx + * ::nvtxRangePop + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message); +NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message); +/** @} */ + + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a nested thread range. +* +* \return The level of the range being ended. If an error occurs a negative +* value is returned on the current thread. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreate("example library"); +* nvtxDomainRangePushA(domain, "Level 0"); +* nvtxDomainRangePushW(domain, L"Level 1"); +* nvtxDomainRangePop(domain); +* nvtxDomainRangePop(domain); +* \endcode +* +* \sa +* ::nvtxRangePushEx +* ::nvtxRangePushA +* ::nvtxRangePushW +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Ends a nested thread range. + * + * \return The level of the range being ended. If an error occurs a negative + * value is returned on the current thread. + * + * \par Example: + * \code + * nvtxRangePushA("Level 0"); + * nvtxRangePushW(L"Level 1"); + * nvtxRangePop(); + * nvtxRangePop(); + * \endcode + * + * \sa + * ::nvtxRangePushEx + * ::nvtxRangePushA + * ::nvtxRangePushW + * + * \version \NVTX_VERSION_0 + * @{ */ +NVTX_DECLSPEC int NVTX_API nvtxRangePop(void); +/** @} */ + + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup RESOURCE_NAMING Resource Naming + * + * See \ref RESOURCE_NAMING for more details + * + * @{ + */ + + +/* ------------------------------------------------------------------------- */ +/** \name Functions for Generic Resource Naming*/ +/* ------------------------------------------------------------------------- */ + +/* ------------------------------------------------------------------------- */ +/** \cond SHOW_HIDDEN +* \brief Resource typing helpers. +* +* Classes are used to make it easy to create a series of resource types +* per API without collisions +*/ +#define NVTX_RESOURCE_MAKE_TYPE(CLASS, INDEX) ((((uint32_t)(NVTX_RESOURCE_CLASS_ ## CLASS))<<16)|((uint32_t)(INDEX))) +#define NVTX_RESOURCE_CLASS_GENERIC 1 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Generic resource type for when a resource class is not available. +* +* \sa +* ::nvtxDomainResourceCreate +* +* \version \NVTX_VERSION_2 +*/ +typedef enum nvtxResourceGenericType_t +{ + NVTX_RESOURCE_TYPE_UNKNOWN = 0, + NVTX_RESOURCE_TYPE_GENERIC_POINTER = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 1), /**< Generic pointer assumed to have no collisions with other pointers. */ + NVTX_RESOURCE_TYPE_GENERIC_HANDLE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 2), /**< Generic handle assumed to have no collisions with other handles. */ + NVTX_RESOURCE_TYPE_GENERIC_THREAD_NATIVE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 3), /**< OS native thread identifier. */ + NVTX_RESOURCE_TYPE_GENERIC_THREAD_POSIX = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 4) /**< POSIX pthread identifier. */ +} nvtxResourceGenericType_t; + + + +/** \brief Resource Attribute Structure. +* \anchor RESOURCE_ATTRIBUTE_STRUCTURE +* +* This structure is used to describe the attributes of a resource. The layout of +* the structure is defined by a specific version of the tools extension +* library and can change between different versions of the Tools Extension +* library. +* +* \par Initializing the Attributes +* +* The caller should always perform the following three tasks when using +* attributes: +*
    +*
  • Zero the structure +*
  • Set the version field +*
  • Set the size field +*
+* +* Zeroing the structure sets all the resource attributes types and values +* to the default value. +* +* The version and size field are used by the Tools Extension +* implementation to handle multiple versions of the attributes structure. +* +* It is recommended that the caller use one of the following to methods +* to initialize the event attributes structure: +* +* \par Method 1: Initializing nvtxEventAttributes for future compatibility +* \code +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* \endcode +* +* \par Method 2: Initializing nvtxEventAttributes for a specific version +* \code +* nvtxResourceAttributes_v0 attribs = {0}; +* attribs.version = 2; +* attribs.size = (uint16_t)(sizeof(nvtxResourceAttributes_v0)); +* \endcode +* +* If the caller uses Method 1 it is critical that the entire binary +* layout of the structure be configured to 0 so that all fields +* are initialized to the default value. +* +* The caller should either use both NVTX_VERSION and +* NVTX_RESOURCE_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values +* and a versioned type (Method 2). Using a mix of the two methods +* will likely cause either source level incompatibility or binary +* incompatibility in the future. +* +* \par Settings Attribute Types and Values +* +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* +* // Initialize +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* +* // Configure the Attributes +* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER; +* attribs.identifier.pValue = (const void*)pMutex; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Single thread access to database."; +* +* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs); +* \endcode +* +* \sa +* ::nvtxDomainResourceCreate +*/ +typedef struct nvtxResourceAttributes_v0 +{ + /** + * \brief Version flag of the structure. + * + * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs + * supported in this header file. This can optionally be overridden to + * another version of the tools extension library. + */ + uint16_t version; + + /** + * \brief Size of the structure. + * + * Needs to be set to the size in bytes of this attribute + * structure. + */ + uint16_t size; + + /** + * \brief Identifier type specifies how to interpret the identifier field + * + * Defines the identifier format of the attribute structure's \ref RESOURCE_IDENTIFIER_FIELD + * "identifier" field. + * + * Default Value is NVTX_RESOURCE_TYPE_UNKNOWN + */ + int32_t identifierType; /* values from enums following the pattern nvtxResource[name]Type_t */ + + /** + * \brief Identifier for the resource. + * \anchor RESOURCE_IDENTIFIER_FIELD + * + * An identifier may be a pointer or a handle to an OS or middleware API object. + * The resource type will assist in avoiding collisions where handles values may collide. + */ + union identifier_t + { + const void* pValue; + uint64_t ullValue; + } identifier; + + /** \brief Message type specified in this attribute structure. + * + * Defines the message format of the attribute structure's \ref RESOURCE_MESSAGE_FIELD + * "message" field. + * + * Default Value is NVTX_MESSAGE_UNKNOWN + */ + int32_t messageType; /* nvtxMessageType_t */ + + /** \brief Message assigned to this attribute structure. \anchor RESOURCE_MESSAGE_FIELD + * + * The text message that is attached to a resource. + */ + nvtxMessageValue_t message; + +} nvtxResourceAttributes_v0; + +typedef struct nvtxResourceAttributes_v0 nvtxResourceAttributes_t; + +/* \cond SHOW_HIDDEN +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxResourceAttributes_v0) ) ) +typedef struct nvtxResourceHandle* nvtxResourceHandle_t; +/** \endcond */ + + + +/* ------------------------------------------------------------------------- */ +/** \brief Create a resource object to track and associate data with OS and middleware objects +* +* Allows users to associate an API handle or pointer with a user-provided name. +* +* +* \param domain - Domain to own the resource object +* \param attribs - Attributes to be associated with the resource +* +* \return A handle that represents the newly created resource object. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER; +* attribs.identifier.pValue = (const void*)pMutex; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Single thread access to database."; +* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs); +* \endcode +* +* \sa +* ::nvtxResourceAttributes_t +* ::nvtxDomainResourceDestroy +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Destroy a resource object to track and associate data with OS and middleware objects +* +* Allows users to associate an API handle or pointer with a user-provided name. +* +* \param resource - Handle to the resource in which to operate. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain"); +* nvtxResourceAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE; +* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER; +* attribs.identifier.pValue = (const void*)pMutex; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Single thread access to database."; +* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs); +* nvtxDomainResourceDestroy(handle); +* \endcode +* +* \sa +* ::nvtxDomainResourceCreate +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource); +/** @} */ + + +/** \name Functions for NVTX Category Naming*/ + +/* ------------------------------------------------------------------------- */ +/** +* \brief Annotate an NVTX category used within a domain. +* +* Categories are used to group sets of events. Each category is identified +* through a unique ID and that ID is passed into any of the marker/range +* events to assign that event to a specific category. The nvtxDomainNameCategory +* function calls allow the user to assign a name to a category ID that is +* specific to the domain. +* +* nvtxDomainNameCategory(NULL, category, name) is equivalent to calling +* nvtxNameCategory(category, name). +* +* \param domain - The domain of scoping the category. +* \param category - The category ID to name. +* \param name - The name of the category. +* +* \remarks The category names are tracked per domain. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("example"); +* nvtxDomainNameCategoryA(domain, 1, "Memory Allocation"); +* nvtxDomainNameCategoryW(domain, 2, L"Memory Transfer"); +* \endcode +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name); +/** @} */ + +/** \brief Annotate an NVTX category. + * + * Categories are used to group sets of events. Each category is identified + * through a unique ID and that ID is passed into any of the marker/range + * events to assign that event to a specific category. The nvtxNameCategory + * function calls allow the user to assign a name to a category ID. + * + * \param category - The category ID to name. + * \param name - The name of the category. + * + * \remarks The category names are tracked per process. + * + * \par Example: + * \code + * nvtxNameCategory(1, "Memory Allocation"); + * nvtxNameCategory(2, "Memory Transfer"); + * nvtxNameCategory(3, "Memory Object Lifetime"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name); +/** @} */ + +/** \name Functions for OS Threads Naming*/ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotate an OS thread. + * + * Allows the user to name an active thread of the current process. If an + * invalid thread ID is provided or a thread ID from a different process is + * used the behavior of the tool is implementation dependent. + * + * Tools expect thread ID to be a number that uniquely identifies the thread + * at the time of the call. Note that a thread's ID can be reused after + * it is destroyed. Tools may choose how to handle aliasing of thread IDs. + * + * POSIX pthread_t type returned by pthread_self() may not comply with these + * expectations. Please use OS-specific thread ID instead of pthread_t. + * + * The thread name is associated to the default domain. To support domains + * use resource objects via ::nvtxDomainResourceCreate. + * + * \param threadId - The ID of the thread to name. + * \param name - The name of the thread. + * + * \par Examples: + * MS Windows: + * \code + * #include + * nvtxNameOsThread(GetCurrentThreadId(), "Current thread"); + * nvtxNameOsThread(GetThreadId(SomeThreadHandle), "Other thread"); + * \endcode + * + * Android: + * \code + * #include + * nvtxNameOsThreadA(gettid(), "Current thread"); + * nvtxNameOsThreadA(getpid(), "Main thread"); + * \endcode + * + * Linux: + * \code + * #include + * nvtxNameOsThreadA(syscall(SYS_gettid), "Current thread"); + * \endcode + * \code + * #include + * nvtxNameOsThreadA(getpid(), "Main thread"); + * \endcode + * + * OS X: + * \code + * #include + * nvtxNameOsThreadA(syscall(SYS_thread_selfid), "Current thread"); + * \endcode + * \code + * #include + * __uint64_t id; + * pthread_threadid_np(pthread_self(), &id); + * nvtxNameOsThreadA(id, "Current thread"); + * pthread_threadid_np(somePThreadId, &id); + * nvtxNameOsThreadA(id, "Other thread"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name); +/** @} */ + + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup STRING_REGISTRATION String Registration +* +* Registered strings are intended to increase performance by lowering instrumentation +* overhead. String may be registered once and the handle may be passed in place of +* a string where an the APIs may allow. +* +* See \ref STRING_REGISTRATION for more details +* +* @{ +*/ + +/* ------------------------------------------------------------------------- */ +/** \brief Register a string. + +* Registers an immutable string with NVTX. Once registered the pointer used +* to register the domain name can be used in nvtxEventAttributes_t +* \ref MESSAGE_FIELD. This allows NVTX implementation to skip copying the +* contents of the message on each event invocation. +* +* String registration is an optimization. It is recommended to use string +* registration if the string will be passed to an event many times. +* +* String are not unregistered, except that by unregistering the entire domain +* +* \param domain - Domain handle. If NULL then the global domain is used. +* \param string - A unique pointer to a sequence of characters. +* +* \return A handle representing the registered string. +* +* \par Example: +* \code +* nvtxDomainCreateA("com.nvidia.nvtx.example"); +* nvtxStringHandle_t message = nvtxDomainRegisterStringA(domain, "registered string"); +* nvtxEventAttributes_t eventAttrib = {0}; +* eventAttrib.version = NVTX_VERSION; +* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib.messageType = NVTX_MESSAGE_TYPE_REGISTERED; +* eventAttrib.message.registered = message; +* \endcode +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string); +NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string); +/** @} */ + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \defgroup DOMAINS Domains +* +* Domains are used to group events to a developer defined scope. Middleware +* vendors may also scope their own events to avoid collisions with the +* the application developer's events, so that the application developer may +* inspect both parts and easily differentiate or filter them. By default +* all events are scoped to a global domain where NULL is provided or when +* using APIs provided b versions of NVTX below v2 +* +* Domains are intended to be typically long lived objects with the intention +* of logically separating events of large modules from each other such as +* middleware libraries from each other and the main application. +* +* See \ref DOMAINS for more details +* +* @{ +*/ + +/* ------------------------------------------------------------------------- */ +/** \brief Register a NVTX domain. +* +* Domains are used to scope annotations. All NVTX_VERSION_0 and NVTX_VERSION_1 +* annotations are scoped to the global domain. The function nvtxDomainCreate +* creates a new named domain. +* +* Each domain maintains its own nvtxRangePush and nvtxRangePop stack. +* +* \param name - A unique string representing the domain. +* +* \return A handle representing the domain. +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example"); +* +* nvtxMarkA("nvtxMarkA to global domain"); +* +* nvtxEventAttributes_t eventAttrib1 = {0}; +* eventAttrib1.version = NVTX_VERSION; +* eventAttrib1.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib1.message.ascii = "nvtxDomainMarkEx to global domain"; +* nvtxDomainMarkEx(NULL, &eventAttrib1); +* +* nvtxEventAttributes_t eventAttrib2 = {0}; +* eventAttrib2.version = NVTX_VERSION; +* eventAttrib2.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; +* eventAttrib2.message.ascii = "nvtxDomainMarkEx to com.nvidia.nvtx.example"; +* nvtxDomainMarkEx(domain, &eventAttrib2); +* nvtxDomainDestroy(domain); +* \endcode +* +* \sa +* ::nvtxDomainDestroy +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* name); +NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Unregister a NVTX domain. +* +* Unregisters the domain handle and frees all domain specific resources. +* +* \param domain - the domain handle +* +* \par Example: +* \code +* nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example"); +* nvtxDomainDestroy(domain); +* \endcode +* +* \sa +* ::nvtxDomainCreateA +* ::nvtxDomainCreateW +* +* \version \NVTX_VERSION_2 +* @{ */ +NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain); +/** @} */ + + +/** @} */ /*END defgroup*/ +/* ========================================================================= */ +/** \cond SHOW_HIDDEN */ + +#ifdef UNICODE + #define nvtxMark nvtxMarkW + #define nvtxRangeStart nvtxRangeStartW + #define nvtxRangePush nvtxRangePushW + #define nvtxNameCategory nvtxNameCategoryW + #define nvtxNameOsThread nvtxNameOsThreadW + /* NVTX_VERSION_2 */ + #define nvtxDomainCreate nvtxDomainCreateW + #define nvtxDomainRegisterString nvtxDomainRegisterStringW + #define nvtxDomainNameCategory nvtxDomainNameCategoryW +#else + #define nvtxMark nvtxMarkA + #define nvtxRangeStart nvtxRangeStartA + #define nvtxRangePush nvtxRangePushA + #define nvtxNameCategory nvtxNameCategoryA + #define nvtxNameOsThread nvtxNameOsThreadA + /* NVTX_VERSION_2 */ + #define nvtxDomainCreate nvtxDomainCreateA + #define nvtxDomainRegisterString nvtxDomainRegisterStringA + #define nvtxDomainNameCategory nvtxDomainNameCategoryA +#endif + +/** \endcond */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#define NVTX_IMPL_GUARD /* Ensure other headers cannot included directly */ + +#include "nvtxDetail/nvtxTypes.h" + +#ifndef NVTX_NO_IMPL +#include "nvtxDetail/nvtxImpl.h" +#endif /*NVTX_NO_IMPL*/ + +#undef NVTX_IMPL_GUARD + +#endif /* !defined(NVTX_VERSION) */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCuda.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCuda.h new file mode 100644 index 0000000000000000000000000000000000000000..8b1e71b7779089430b39e0fe3483ad536bb4bb7d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCuda.h @@ -0,0 +1,170 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#include "cuda.h" + +#ifndef NVTOOLSEXT_CUDA_V3 +#define NVTOOLSEXT_CUDA_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* ========================================================================= */ +/** \name Functions for CUDA Resource Naming +*/ +/** \addtogroup RESOURCE_NAMING + * \section RESOURCE_NAMING_CUDA CUDA Resource Naming + * + * This section covers the API functions that allow to annotate CUDA resources + * with user-provided names. + * + * @{ + */ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_CUDA 4 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Resource types for CUDA +*/ +typedef enum nvtxResourceCUDAType_t +{ + NVTX_RESOURCE_TYPE_CUDA_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDA, 1), /* CUdevice */ + NVTX_RESOURCE_TYPE_CUDA_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 2), /* CUcontext */ + NVTX_RESOURCE_TYPE_CUDA_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDA, 3), /* CUstream */ + NVTX_RESOURCE_TYPE_CUDA_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 4), /* CUevent */ +} nvtxResourceCUDAType_t; + + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA device. + * + * Allows the user to associate a CUDA device with a user-provided name. + * + * \param device - The handle of the CUDA device to name. + * \param name - The name of the CUDA device. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA context. + * + * Allows the user to associate a CUDA context with a user-provided name. + * + * \param context - The handle of the CUDA context to name. + * \param name - The name of the CUDA context. + * + * \par Example: + * \code + * CUresult status = cuCtxCreate( &cuContext, 0, cuDevice ); + * if ( CUDA_SUCCESS != status ) + * goto Error; + * nvtxNameCuContext(cuContext, "CTX_NAME"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA stream. + * + * Allows the user to associate a CUDA stream with a user-provided name. + * + * \param stream - The handle of the CUDA stream to name. + * \param name - The name of the CUDA stream. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA event. + * + * Allows the user to associate a CUDA event with a user-provided name. + * + * \param event - The handle of the CUDA event to name. + * \param name - The name of the CUDA event. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name); +/** @} */ + +/** @} */ /* END RESOURCE_NAMING */ + +/* ========================================================================= */ +#ifdef UNICODE + #define nvtxNameCuDevice nvtxNameCuDeviceW + #define nvtxNameCuContext nvtxNameCuContextW + #define nvtxNameCuStream nvtxNameCuStreamW + #define nvtxNameCuEvent nvtxNameCuEventW +#else + #define nvtxNameCuDevice nvtxNameCuDeviceA + #define nvtxNameCuContext nvtxNameCuContextA + #define nvtxNameCuStream nvtxNameCuStreamA + #define nvtxNameCuEvent nvtxNameCuEventA +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_CUDA /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplCuda_v3.h" +#undef NVTX_IMPL_GUARD_CUDA +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_CUDA_V3 */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCudaRt.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCudaRt.h new file mode 100644 index 0000000000000000000000000000000000000000..044d3dab5c32e7ec6646f3a1f3e9d89e52d40ba2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCudaRt.h @@ -0,0 +1,146 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#include "cuda.h" +#include "driver_types.h" + +#ifndef NVTOOLSEXT_CUDART_V3 +#define NVTOOLSEXT_CUDART_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* ========================================================================= */ +/** \name Functions for CUDA Resource Naming +*/ +/** \addtogroup RESOURCE_NAMING + * \section RESOURCE_NAMING_CUDART CUDA Runtime Resource Naming + * + * This section covers the API functions that allow to annotate CUDA resources + * with user-provided names. + * + * @{ + */ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_CUDART 5 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Resource types for CUDART +*/ +typedef enum nvtxResourceCUDARTType_t +{ + NVTX_RESOURCE_TYPE_CUDART_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDART, 0), /* int device */ + NVTX_RESOURCE_TYPE_CUDART_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDART, 1), /* cudaStream_t */ + NVTX_RESOURCE_TYPE_CUDART_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDART, 2), /* cudaEvent_t */ +} nvtxResourceCUDARTType_t; + + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA device. + * + * Allows the user to associate a CUDA device with a user-provided name. + * + * \param device - The id of the CUDA device to name. + * \param name - The name of the CUDA device. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceA(int device, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceW(int device, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA stream. + * + * Allows the user to associate a CUDA stream with a user-provided name. + * + * \param stream - The handle of the CUDA stream to name. + * \param name - The name of the CUDA stream. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamA(cudaStream_t stream, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamW(cudaStream_t stream, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates a CUDA event. + * + * Allows the user to associate a CUDA event with a user-provided name. + * + * \param event - The handle of the CUDA event to name. + * \param name - The name of the CUDA event. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventA(cudaEvent_t event, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventW(cudaEvent_t event, const wchar_t* name); +/** @} */ + +/** @} */ /* END RESOURCE_NAMING */ + +/* ========================================================================= */ +#ifdef UNICODE + #define nvtxNameCudaDevice nvtxNameCudaDeviceW + #define nvtxNameCudaStream nvtxNameCudaStreamW + #define nvtxNameCudaEvent nvtxNameCudaEventW +#else + #define nvtxNameCudaDevice nvtxNameCudaDeviceA + #define nvtxNameCudaStream nvtxNameCudaStreamA + #define nvtxNameCudaEvent nvtxNameCudaEventA +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_CUDART /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplCudaRt_v3.h" +#undef NVTX_IMPL_GUARD_CUDART +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_CUDART_V3 */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtOpenCL.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtOpenCL.h new file mode 100644 index 0000000000000000000000000000000000000000..38a9290b438d30f8450b6ea2f4e5de68d37d4a97 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtOpenCL.h @@ -0,0 +1,220 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#include + +#ifndef NVTOOLSEXT_OPENCL_V3 +#define NVTOOLSEXT_OPENCL_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* ========================================================================= */ +/** \name Functions for OpenCL Resource Naming + */ +/** \addtogroup RESOURCE_NAMING + * \section RESOURCE_NAMING_OPENCL OpenCL Resource Naming + * + * This section covers the API functions that allow to annotate OpenCL resources + * with user-provided names. + * + * @{ + */ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_OPENCL 6 +/** \endcond */ + +/* ------------------------------------------------------------------------- */ +/** \brief Resource types for OpenCL +*/ +typedef enum nvtxResourceOpenCLType_t +{ + NVTX_RESOURCE_TYPE_OPENCL_DEVICE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 1), + NVTX_RESOURCE_TYPE_OPENCL_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 2), + NVTX_RESOURCE_TYPE_OPENCL_COMMANDQUEUE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 3), + NVTX_RESOURCE_TYPE_OPENCL_MEMOBJECT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 4), + NVTX_RESOURCE_TYPE_OPENCL_SAMPLER = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 5), + NVTX_RESOURCE_TYPE_OPENCL_PROGRAM = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 6), + NVTX_RESOURCE_TYPE_OPENCL_EVENT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 7), +} nvtxResourceOpenCLType_t; + + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL device. + * + * Allows to associate an OpenCL device with a user-provided name. + * + * \param device - The handle of the OpenCL device to name. + * \param name - The name of the OpenCL device. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceA(cl_device_id device, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceW(cl_device_id device, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL context. + * + * Allows to associate an OpenCL context with a user-provided name. + * + * \param context - The handle of the OpenCL context to name. + * \param name - The name of the OpenCL context. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClContextA(cl_context context, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClContextW(cl_context context, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL command queue. + * + * Allows to associate an OpenCL command queue with a user-provided name. + * + * \param command_queue - The handle of the OpenCL command queue to name. + * \param name - The name of the OpenCL command queue. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueA(cl_command_queue command_queue, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueW(cl_command_queue command_queue, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL memory object. + * + * Allows to associate an OpenCL memory object with a user-provided name. + * + * \param memobj - The handle of the OpenCL memory object to name. + * \param name - The name of the OpenCL memory object. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectA(cl_mem memobj, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectW(cl_mem memobj, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL sampler. + * + * Allows to associate an OpenCL sampler with a user-provided name. + * + * \param sampler - The handle of the OpenCL sampler to name. + * \param name - The name of the OpenCL sampler. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerA(cl_sampler sampler, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerW(cl_sampler sampler, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL program. + * + * Allows to associate an OpenCL program with a user-provided name. + * + * \param program - The handle of the OpenCL program to name. + * \param name - The name of the OpenCL program. + * + * \code + * cpProgram = clCreateProgramWithSource(cxGPUContext, 1, + * (const char **) &cSourceCL, &program_length, &ciErrNum); + * shrCheckErrorEX(ciErrNum, CL_SUCCESS, pCleanup); + * nvtxNameClProgram(cpProgram, L"PROGRAM_NAME"); + * \endcode + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClProgramA(cl_program program, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClProgramW(cl_program program, const wchar_t* name); +/** @} */ + +/* ------------------------------------------------------------------------- */ +/** \brief Annotates an OpenCL event. + * + * Allows to associate an OpenCL event with a user-provided name. + * + * \param evnt - The handle of the OpenCL event to name. + * \param name - The name of the OpenCL event. + * + * \version \NVTX_VERSION_1 + * @{ */ +NVTX_DECLSPEC void NVTX_API nvtxNameClEventA(cl_event evnt, const char* name); +NVTX_DECLSPEC void NVTX_API nvtxNameClEventW(cl_event evnt, const wchar_t* name); +/** @} */ + +/** @} */ /* END RESOURCE_NAMING */ + +/* ========================================================================= */ +#ifdef UNICODE + #define nvtxNameClDevice nvtxNameClDeviceW + #define nvtxNameClContext nvtxNameClContextW + #define nvtxNameClCommandQueue nvtxNameClCommandQueueW + #define nvtxNameClMemObject nvtxNameClMemObjectW + #define nvtxNameClSampler nvtxNameClSamplerW + #define nvtxNameClProgram nvtxNameClProgramW + #define nvtxNameClEvent nvtxNameClEventW +#else + #define nvtxNameClDevice nvtxNameClDeviceA + #define nvtxNameClContext nvtxNameClContextA + #define nvtxNameClCommandQueue nvtxNameClCommandQueueA + #define nvtxNameClMemObject nvtxNameClMemObjectA + #define nvtxNameClSampler nvtxNameClSamplerA + #define nvtxNameClProgram nvtxNameClProgramA + #define nvtxNameClEvent nvtxNameClEventA +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_OPENCL /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplOpenCL_v3.h" +#undef NVTX_IMPL_GUARD_OPENCL +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_OPENCL_V3 */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtSync.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtSync.h new file mode 100644 index 0000000000000000000000000000000000000000..afc3db98fe1b05d8dfb309221243ae3b4c14dd9d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtSync.h @@ -0,0 +1,411 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#include "nvToolsExt.h" + +#ifndef NVTOOLSEXT_SYNC_V3 +#define NVTOOLSEXT_SYNC_V3 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* \cond SHOW_HIDDEN +* \version \NVTX_VERSION_2 +*/ +#define NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxSyncUserAttributes_v0) ) ) +/** \endcond */ + + +/** +* \page PAGE_SYNCHRONIZATION Synchronization +* +* This section covers a subset of the API that allow users to track additional +* synchronization details of their application. Naming OS synchronization primitives +* may allow users to better understand the data collected by traced synchronization +* APIs. Additionally, a user defined synchronization object can allow the users to +* to tell the tools when the user is building their own synchronization system +* that do not rely on the OS to provide behaviors and instead use techniques like +* atomic operations and spinlocks. +* +* See module \ref SYNCHRONIZATION for details. +* +* \par Example: +* \code +* class MyMutex +* { +* volatile long bLocked; +* nvtxSyncUser_t hSync; +* public: +* MyMutex(const char* name, nvtxDomainHandle_t d){ +* bLocked = 0; +* +* nvtxSyncUserAttributes_t attribs = { 0 }; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE; +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = name; +* hSync = nvtxDomainSyncUserCreate(d, &attribs); +* } +* +* ~MyMutex() { +* nvtxDomainSyncUserDestroy(hSync); +* } +* +* bool Lock() { +* nvtxDomainSyncUserAcquireStart(hSync); +* bool acquired = __sync_bool_compare_and_swap(&bLocked, 0, 1);//atomic compiler intrinsic + +* if (acquired) { +* nvtxDomainSyncUserAcquireSuccess(hSync); +* } +* else { +* nvtxDomainSyncUserAcquireFailed(hSync); +* } +* return acquired; +* } + +* void Unlock() { +* nvtxDomainSyncUserReleasing(hSync); +* bLocked = false; +* } +* }; +* \endcode +* +* \version \NVTX_VERSION_2 +*/ + +/* ------------------------------------------------------------------------- */ +/* \cond SHOW_HIDDEN +* \brief Used to build a non-colliding value for resource types separated class +* \version \NVTX_VERSION_2 +*/ +#define NVTX_RESOURCE_CLASS_SYNC_OS 2 /**< Synchronization objects that are OS specific. */ +#define NVTX_RESOURCE_CLASS_SYNC_PTHREAD 3 /**< Synchronization objects that are from the POSIX Threads API (pthread)*/ +/** \endcond */ + + +/* ------------------------------------------------------------------------- */ +/** \defgroup SYNCHRONIZATION Synchronization +* See page \ref PAGE_SYNCHRONIZATION. +* @{ +*/ + +/** \brief Resource type values for OSs with POSIX Thread API support + */ +typedef enum nvtxResourceSyncPosixThreadType_t +{ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 1), /* pthread_mutex_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_CONDITION = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 2), /* pthread_cond_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_RWLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 3), /* pthread_rwlock_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_BARRIER = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 4), /* pthread_barrier_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_SPINLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 5), /* pthread_spinlock_t */ + NVTX_RESOURCE_TYPE_SYNC_PTHREAD_ONCE = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 6) /* pthread_once_t */ +} nvtxResourceSyncPosixThreadType_t; + +/** \brief Resource type values for Windows OSs +*/ +typedef enum nvtxResourceSyncWindowsType_t +{ + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 1), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_SEMAPHORE = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 2), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_EVENT = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 3), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_CRITICAL_SECTION = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 4), + NVTX_RESOURCE_TYPE_SYNC_WINDOWS_SRWLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 5) +} nvtxResourceSyncWindowsType_t; + +/** \brief Resource type values for Linux and Linux derived OSs such as Android +* \sa +* ::nvtxResourceSyncPosixThreadType_t +*/ +typedef enum nvtxResourceSyncLinuxType_t +{ + NVTX_RESOURCE_TYPE_SYNC_LINUX_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 1), + NVTX_RESOURCE_TYPE_SYNC_LINUX_FUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 2), + NVTX_RESOURCE_TYPE_SYNC_LINUX_SEMAPHORE = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 3), + NVTX_RESOURCE_TYPE_SYNC_LINUX_COMPLETION = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 4), + NVTX_RESOURCE_TYPE_SYNC_LINUX_SPINLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 5), + NVTX_RESOURCE_TYPE_SYNC_LINUX_SEQLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 6), + NVTX_RESOURCE_TYPE_SYNC_LINUX_RCU = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 7) +} nvtxResourceSyncLinuxType_t; + +/** \brief Resource type values for Android come from Linux. +* \sa +* ::nvtxResourceSyncLinuxType_t +* ::nvtxResourceSyncPosixThreadType_t +*/ +typedef enum nvtxResourceSyncLinuxType_t nvtxResourceSyncAndroidType_t; + +/** \brief User Defined Synchronization Object Handle . +* \anchor SYNCUSER_HANDLE_STRUCTURE +* +* This structure is opaque to the user and is used as a handle to reference +* a user defined syncrhonization object. The tools will return a pointer through the API for the application +* to hold on it's behalf to reference the string in the future. +* +*/ +typedef struct nvtxSyncUser* nvtxSyncUser_t; + +/** \brief User Defined Synchronization Object Attributes Structure. +* \anchor USERDEF_SYNC_ATTRIBUTES_STRUCTURE +* +* This structure is used to describe the attributes of a user defined synchronization +* object. The layout of the structure is defined by a specific version of the tools +* extension library and can change between different versions of the Tools Extension +* library. +* +* \par Initializing the Attributes +* +* The caller should always perform the following three tasks when using +* attributes: +*
    +*
  • Zero the structure +*
  • Set the version field +*
  • Set the size field +*
+* +* Zeroing the structure sets all the event attributes types and values +* to the default value. +* +* The version and size field are used by the Tools Extension +* implementation to handle multiple versions of the attributes structure. +* +* It is recommended that the caller use one of the following to methods +* to initialize the event attributes structure: +* +* \par Method 1: Initializing nvtxEventAttributes for future compatibility +* \code +* nvtxSyncUserAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE; +* \endcode +* +* \par Method 2: Initializing nvtxSyncUserAttributes_t for a specific version +* \code +* nvtxSyncUserAttributes_t attribs = {0}; +* attribs.version = 1; +* attribs.size = (uint16_t)(sizeof(nvtxSyncUserAttributes_t)); +* \endcode +* +* If the caller uses Method 1 it is critical that the entire binary +* layout of the structure be configured to 0 so that all fields +* are initialized to the default value. +* +* The caller should either use both NVTX_VERSION and +* NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values +* and a versioned type (Method 2). Using a mix of the two methods +* will likely cause either source level incompatibility or binary +* incompatibility in the future. +* +* \par Settings Attribute Types and Values +* +* +* \par Example: +* \code +* // Initialize +* nvtxSyncUserAttributes_t attribs = {0}; +* attribs.version = NVTX_VERSION; +* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE; +* +* // Configure the Attributes +* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII; +* attribs.message.ascii = "Example"; +* \endcode +* +* \sa +* ::nvtxDomainSyncUserCreate +*/ +typedef struct nvtxSyncUserAttributes_v0 +{ + /** + * \brief Version flag of the structure. + * + * Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs + * supported in this header file. This can optionally be overridden to + * another version of the tools extension library. + */ + uint16_t version; + + /** + * \brief Size of the structure. + * + * Needs to be set to the size in bytes of the event attribute + * structure used to specify the event. + */ + uint16_t size; + + /** \brief Message type specified in this attribute structure. + * + * Defines the message format of the attribute structure's \ref nvtxSyncUserAttributes_v0::message + * "message" field. + * + * Default Value is NVTX_MESSAGE_UNKNOWN + */ + int32_t messageType; /* nvtxMessageType_t */ + + /** \brief Message assigned to this attribute structure. + * + * The text message that is attached to an event. + */ + nvtxMessageValue_t message; + +} nvtxSyncUserAttributes_v0; + +typedef struct nvtxSyncUserAttributes_v0 nvtxSyncUserAttributes_t; + +/* ------------------------------------------------------------------------- */ +/** \brief Create a user defined synchronization object +* This is used to track non-OS synchronization working with spinlocks and atomics +* +* \param domain - Domain to own the resource +* \param attribs - A structure to assign multiple attributes to the object. +* +* \return A handle that represents the newly created user defined synchronization object. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC nvtxSyncUser_t NVTX_API nvtxDomainSyncUserCreate(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs); + +/* ------------------------------------------------------------------------- */ +/** \brief Destroy a user defined synchronization object +* This is used to track non-OS synchronization working with spinlocks and atomics +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserDestroy(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools that an attempt to acquire a user defined synchronization object +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireStart(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools of failure in acquiring a user defined synchronization object +* This should be called after \ref nvtxDomainSyncUserAcquireStart +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireFailed(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools of success in acquiring a user defined synchronization object +* This should be called after \ref nvtxDomainSyncUserAcquireStart. +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireSuccess(nvtxSyncUser_t handle); + +/* ------------------------------------------------------------------------- */ +/** \brief Signal to tools of releasing a reservation on user defined synchronization object +* This should be called after \ref nvtxDomainSyncUserAcquireSuccess. +* +* \param handle - A handle to the object to operate on. +* +* \sa +* ::nvtxDomainSyncUserCreate +* ::nvtxDomainSyncUserDestroy +* ::nvtxDomainSyncUserAcquireStart +* ::nvtxDomainSyncUserAcquireFailed +* ::nvtxDomainSyncUserAcquireSuccess +* ::nvtxDomainSyncUserReleasing +* +* \version \NVTX_VERSION_2 +*/ +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserReleasing(nvtxSyncUser_t handle); + + +/** @} */ /*END defgroup*/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#ifndef NVTX_NO_IMPL +#define NVTX_IMPL_GUARD_SYNC /* Ensure other headers cannot included directly */ +#include "nvtxDetail/nvtxImplSync_v3.h" +#undef NVTX_IMPL_GUARD_SYNC +#endif /*NVTX_NO_IMPL*/ + +#endif /* NVTOOLSEXT_SYNC_V3 */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImpl.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..bdc6bd4c72d74ef0f1fe5c2c8b4c49b196927fa7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImpl.h @@ -0,0 +1,469 @@ +/* This file was procedurally generated! Do not modify this file by hand. */ + +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#ifndef NVTX_IMPL_GUARD +#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined). +#endif + +/* ---- Include required platform headers ---- */ + +#if defined(_WIN32) + +#include + +#else +#include + +#if defined(__ANDROID__) +#include +#endif + +#if defined(__linux__) || defined(__CYGWIN__) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#endif + +/* ---- Define macros used in this file ---- */ + +#define NVTX_INIT_STATE_FRESH 0 +#define NVTX_INIT_STATE_STARTED 1 +#define NVTX_INIT_STATE_COMPLETE 2 + +#ifdef NVTX_DEBUG_PRINT +#ifdef __ANDROID__ +#include +#define NVTX_ERR(...) __android_log_print(ANDROID_LOG_ERROR, "NVTOOLSEXT", __VA_ARGS__); +#define NVTX_INFO(...) __android_log_print(ANDROID_LOG_INFO, "NVTOOLSEXT", __VA_ARGS__); +#else +#include +#define NVTX_ERR(...) fprintf(stderr, "NVTX_ERROR: " __VA_ARGS__) +#define NVTX_INFO(...) fprintf(stderr, "NVTX_INFO: " __VA_ARGS__) +#endif +#else /* !defined(NVTX_DEBUG_PRINT) */ +#define NVTX_ERR(...) +#define NVTX_INFO(...) +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef __GNUC__ +#pragma GCC visibility push(hidden) +#endif + +/* ---- Forward declare all functions referenced in globals ---- */ + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(void); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiGetModuleFunctionTable)( + NvtxCallbackModule module, + NvtxFunctionTable* out_table, + unsigned int* out_size); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiSetInjectionNvtxVersion)( + uint32_t version); +NVTX_LINKONCE_FWDDECL_FUNCTION const void* NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxGetExportTable)( + uint32_t exportTableId); + +#include "nvtxInitDecls.h" + +/* ---- Define all globals ---- */ + +typedef struct nvtxGlobals_t +{ + volatile unsigned int initState; + NvtxExportTableCallbacks etblCallbacks; + NvtxExportTableVersionInfo etblVersionInfo; + + /* Implementation function pointers */ + nvtxMarkEx_impl_fntype nvtxMarkEx_impl_fnptr; + nvtxMarkA_impl_fntype nvtxMarkA_impl_fnptr; + nvtxMarkW_impl_fntype nvtxMarkW_impl_fnptr; + nvtxRangeStartEx_impl_fntype nvtxRangeStartEx_impl_fnptr; + nvtxRangeStartA_impl_fntype nvtxRangeStartA_impl_fnptr; + nvtxRangeStartW_impl_fntype nvtxRangeStartW_impl_fnptr; + nvtxRangeEnd_impl_fntype nvtxRangeEnd_impl_fnptr; + nvtxRangePushEx_impl_fntype nvtxRangePushEx_impl_fnptr; + nvtxRangePushA_impl_fntype nvtxRangePushA_impl_fnptr; + nvtxRangePushW_impl_fntype nvtxRangePushW_impl_fnptr; + nvtxRangePop_impl_fntype nvtxRangePop_impl_fnptr; + nvtxNameCategoryA_impl_fntype nvtxNameCategoryA_impl_fnptr; + nvtxNameCategoryW_impl_fntype nvtxNameCategoryW_impl_fnptr; + nvtxNameOsThreadA_impl_fntype nvtxNameOsThreadA_impl_fnptr; + nvtxNameOsThreadW_impl_fntype nvtxNameOsThreadW_impl_fnptr; + + nvtxNameCuDeviceA_fakeimpl_fntype nvtxNameCuDeviceA_impl_fnptr; + nvtxNameCuDeviceW_fakeimpl_fntype nvtxNameCuDeviceW_impl_fnptr; + nvtxNameCuContextA_fakeimpl_fntype nvtxNameCuContextA_impl_fnptr; + nvtxNameCuContextW_fakeimpl_fntype nvtxNameCuContextW_impl_fnptr; + nvtxNameCuStreamA_fakeimpl_fntype nvtxNameCuStreamA_impl_fnptr; + nvtxNameCuStreamW_fakeimpl_fntype nvtxNameCuStreamW_impl_fnptr; + nvtxNameCuEventA_fakeimpl_fntype nvtxNameCuEventA_impl_fnptr; + nvtxNameCuEventW_fakeimpl_fntype nvtxNameCuEventW_impl_fnptr; + + nvtxNameClDeviceA_fakeimpl_fntype nvtxNameClDeviceA_impl_fnptr; + nvtxNameClDeviceW_fakeimpl_fntype nvtxNameClDeviceW_impl_fnptr; + nvtxNameClContextA_fakeimpl_fntype nvtxNameClContextA_impl_fnptr; + nvtxNameClContextW_fakeimpl_fntype nvtxNameClContextW_impl_fnptr; + nvtxNameClCommandQueueA_fakeimpl_fntype nvtxNameClCommandQueueA_impl_fnptr; + nvtxNameClCommandQueueW_fakeimpl_fntype nvtxNameClCommandQueueW_impl_fnptr; + nvtxNameClMemObjectA_fakeimpl_fntype nvtxNameClMemObjectA_impl_fnptr; + nvtxNameClMemObjectW_fakeimpl_fntype nvtxNameClMemObjectW_impl_fnptr; + nvtxNameClSamplerA_fakeimpl_fntype nvtxNameClSamplerA_impl_fnptr; + nvtxNameClSamplerW_fakeimpl_fntype nvtxNameClSamplerW_impl_fnptr; + nvtxNameClProgramA_fakeimpl_fntype nvtxNameClProgramA_impl_fnptr; + nvtxNameClProgramW_fakeimpl_fntype nvtxNameClProgramW_impl_fnptr; + nvtxNameClEventA_fakeimpl_fntype nvtxNameClEventA_impl_fnptr; + nvtxNameClEventW_fakeimpl_fntype nvtxNameClEventW_impl_fnptr; + + nvtxNameCudaDeviceA_impl_fntype nvtxNameCudaDeviceA_impl_fnptr; + nvtxNameCudaDeviceW_impl_fntype nvtxNameCudaDeviceW_impl_fnptr; + nvtxNameCudaStreamA_fakeimpl_fntype nvtxNameCudaStreamA_impl_fnptr; + nvtxNameCudaStreamW_fakeimpl_fntype nvtxNameCudaStreamW_impl_fnptr; + nvtxNameCudaEventA_fakeimpl_fntype nvtxNameCudaEventA_impl_fnptr; + nvtxNameCudaEventW_fakeimpl_fntype nvtxNameCudaEventW_impl_fnptr; + + nvtxDomainMarkEx_impl_fntype nvtxDomainMarkEx_impl_fnptr; + nvtxDomainRangeStartEx_impl_fntype nvtxDomainRangeStartEx_impl_fnptr; + nvtxDomainRangeEnd_impl_fntype nvtxDomainRangeEnd_impl_fnptr; + nvtxDomainRangePushEx_impl_fntype nvtxDomainRangePushEx_impl_fnptr; + nvtxDomainRangePop_impl_fntype nvtxDomainRangePop_impl_fnptr; + nvtxDomainResourceCreate_impl_fntype nvtxDomainResourceCreate_impl_fnptr; + nvtxDomainResourceDestroy_impl_fntype nvtxDomainResourceDestroy_impl_fnptr; + nvtxDomainNameCategoryA_impl_fntype nvtxDomainNameCategoryA_impl_fnptr; + nvtxDomainNameCategoryW_impl_fntype nvtxDomainNameCategoryW_impl_fnptr; + nvtxDomainRegisterStringA_impl_fntype nvtxDomainRegisterStringA_impl_fnptr; + nvtxDomainRegisterStringW_impl_fntype nvtxDomainRegisterStringW_impl_fnptr; + nvtxDomainCreateA_impl_fntype nvtxDomainCreateA_impl_fnptr; + nvtxDomainCreateW_impl_fntype nvtxDomainCreateW_impl_fnptr; + nvtxDomainDestroy_impl_fntype nvtxDomainDestroy_impl_fnptr; + nvtxInitialize_impl_fntype nvtxInitialize_impl_fnptr; + + nvtxDomainSyncUserCreate_impl_fntype nvtxDomainSyncUserCreate_impl_fnptr; + nvtxDomainSyncUserDestroy_impl_fntype nvtxDomainSyncUserDestroy_impl_fnptr; + nvtxDomainSyncUserAcquireStart_impl_fntype nvtxDomainSyncUserAcquireStart_impl_fnptr; + nvtxDomainSyncUserAcquireFailed_impl_fntype nvtxDomainSyncUserAcquireFailed_impl_fnptr; + nvtxDomainSyncUserAcquireSuccess_impl_fntype nvtxDomainSyncUserAcquireSuccess_impl_fnptr; + nvtxDomainSyncUserReleasing_impl_fntype nvtxDomainSyncUserReleasing_impl_fnptr; + + /* Tables of function pointers -- Extra null added to the end to ensure + * a crash instead of silent corruption if a tool reads off the end. */ + NvtxFunctionPointer* functionTable_CORE [NVTX_CBID_CORE_SIZE + 1]; + NvtxFunctionPointer* functionTable_CUDA [NVTX_CBID_CUDA_SIZE + 1]; + NvtxFunctionPointer* functionTable_OPENCL[NVTX_CBID_OPENCL_SIZE + 1]; + NvtxFunctionPointer* functionTable_CUDART[NVTX_CBID_CUDART_SIZE + 1]; + NvtxFunctionPointer* functionTable_CORE2 [NVTX_CBID_CORE2_SIZE + 1]; + NvtxFunctionPointer* functionTable_SYNC [NVTX_CBID_SYNC_SIZE + 1]; +} nvtxGlobals_t; + +NVTX_LINKONCE_DEFINE_GLOBAL nvtxGlobals_t NVTX_VERSIONED_IDENTIFIER(nvtxGlobals) = +{ + NVTX_INIT_STATE_FRESH, + + { + sizeof(NvtxExportTableCallbacks), + NVTX_VERSIONED_IDENTIFIER(nvtxEtiGetModuleFunctionTable) + }, + { + sizeof(NvtxExportTableVersionInfo), + NVTX_VERSION, + 0, + NVTX_VERSIONED_IDENTIFIER(nvtxEtiSetInjectionNvtxVersion) + }, + + /* Implementation function pointers */ + NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init), + + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init), + + NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init), + + NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init), + + NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init), + + NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init), + NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init), + + /* Tables of function pointers */ + { + 0, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr, + 0 + }, + { + 0, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr, + 0 + }, + { + 0, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr, + 0 + }, + { + 0, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr, + 0 + }, + { + 0, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr, + 0 + }, + { + 0, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr, + (NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr, + 0 + } +}; + +/* ---- Define static inline implementations of core API functions ---- */ + +#include "nvtxImplCore.h" + +/* ---- Define implementations of export table functions ---- */ + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiGetModuleFunctionTable)( + NvtxCallbackModule module, + NvtxFunctionTable* out_table, + unsigned int* out_size) +{ + unsigned int bytes = 0; + NvtxFunctionTable table = (NvtxFunctionTable)0; + + switch (module) + { + case NVTX_CB_MODULE_CORE: + table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE; + bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE); + break; + case NVTX_CB_MODULE_CUDA: + table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDA; + bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDA); + break; + case NVTX_CB_MODULE_OPENCL: + table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_OPENCL; + bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_OPENCL); + break; + case NVTX_CB_MODULE_CUDART: + table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDART; + bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDART); + break; + case NVTX_CB_MODULE_CORE2: + table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE2; + bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE2); + break; + case NVTX_CB_MODULE_SYNC: + table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_SYNC; + bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_SYNC); + break; + default: return 0; + } + + if (out_size) + *out_size = (bytes / (unsigned int)sizeof(NvtxFunctionPointer*)) - 1; + + if (out_table) + *out_table = table; + + return 1; +} + +NVTX_LINKONCE_DEFINE_FUNCTION const void* NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxGetExportTable)(uint32_t exportTableId) +{ + switch (exportTableId) + { + case NVTX_ETID_CALLBACKS: return &NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).etblCallbacks; + case NVTX_ETID_VERSIONINFO: return &NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).etblVersionInfo; + default: return 0; + } +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiSetInjectionNvtxVersion)(uint32_t version) +{ + /* Reserved for custom implementations to resolve problems with tools */ + (void)version; +} + +/* ---- Define implementations of init versions of all API functions ---- */ + +#include "nvtxInitDefs.h" + +/* ---- Define implementations of initialization functions ---- */ + +#include "nvtxInit.h" + +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCore.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCore.h new file mode 100644 index 0000000000000000000000000000000000000000..aee1014ecd53f8b980442109e51fdbc7672ff6d0 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCore.h @@ -0,0 +1,299 @@ +NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib) +{ +#ifndef NVTX_DISABLE + nvtxMarkEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr; + if(local!=0) + (*local)(eventAttrib); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message) +{ +#ifndef NVTX_DISABLE + nvtxMarkA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr; + if(local!=0) + (*local)(message); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message) +{ +#ifndef NVTX_DISABLE + nvtxMarkW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr; + if(local!=0) + (*local)(message); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib) +{ +#ifndef NVTX_DISABLE + nvtxRangeStartEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr; + if(local!=0) + return (*local)(eventAttrib); + else +#endif /*NVTX_DISABLE*/ + return (nvtxRangeId_t)0; +} + +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message) +{ +#ifndef NVTX_DISABLE + nvtxRangeStartA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr; + if(local!=0) + return (*local)(message); + else +#endif /*NVTX_DISABLE*/ + return (nvtxRangeId_t)0; +} + +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message) +{ +#ifndef NVTX_DISABLE + nvtxRangeStartW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr; + if(local!=0) + return (*local)(message); + else +#endif /*NVTX_DISABLE*/ + return (nvtxRangeId_t)0; +} + +NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id) +{ +#ifndef NVTX_DISABLE + nvtxRangeEnd_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr; + if(local!=0) + (*local)(id); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib) +{ +#ifndef NVTX_DISABLE + nvtxRangePushEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr; + if(local!=0) + return (*local)(eventAttrib); + else +#endif /*NVTX_DISABLE*/ + return (int)NVTX_NO_PUSH_POP_TRACKING; +} + +NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message) +{ +#ifndef NVTX_DISABLE + nvtxRangePushA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr; + if(local!=0) + return (*local)(message); + else +#endif /*NVTX_DISABLE*/ + return (int)NVTX_NO_PUSH_POP_TRACKING; +} + +NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message) +{ +#ifndef NVTX_DISABLE + nvtxRangePushW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr; + if(local!=0) + return (*local)(message); + else +#endif /*NVTX_DISABLE*/ + return (int)NVTX_NO_PUSH_POP_TRACKING; +} + +NVTX_DECLSPEC int NVTX_API nvtxRangePop(void) +{ +#ifndef NVTX_DISABLE + nvtxRangePop_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr; + if(local!=0) + return (*local)(); + else +#endif /*NVTX_DISABLE*/ + return (int)NVTX_NO_PUSH_POP_TRACKING; +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCategoryA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr; + if(local!=0) + (*local)(category, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCategoryW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr; + if(local!=0) + (*local)(category, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameOsThreadA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr; + if(local!=0) + (*local)(threadId, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameOsThreadW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr; + if(local!=0) + (*local)(threadId, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib) +{ +#ifndef NVTX_DISABLE + nvtxDomainMarkEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr; + if(local!=0) + (*local)(domain, eventAttrib); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib) +{ +#ifndef NVTX_DISABLE + nvtxDomainRangeStartEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr; + if(local!=0) + return (*local)(domain, eventAttrib); + else +#endif /*NVTX_DISABLE*/ + return (nvtxRangeId_t)0; +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id) +{ +#ifndef NVTX_DISABLE + nvtxDomainRangeEnd_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr; + if(local!=0) + (*local)(domain, id); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib) +{ +#ifndef NVTX_DISABLE + nvtxDomainRangePushEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr; + if(local!=0) + return (*local)(domain, eventAttrib); + else +#endif /*NVTX_DISABLE*/ + return (int)NVTX_NO_PUSH_POP_TRACKING; +} + +NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain) +{ +#ifndef NVTX_DISABLE + nvtxDomainRangePop_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr; + if(local!=0) + return (*local)(domain); + else +#endif /*NVTX_DISABLE*/ + return (int)NVTX_NO_PUSH_POP_TRACKING; +} + +NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs) +{ +#ifndef NVTX_DISABLE + nvtxDomainResourceCreate_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr; + if(local!=0) + return (*local)(domain, attribs); + else +#endif /*NVTX_DISABLE*/ + return (nvtxResourceHandle_t)0; +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource) +{ +#ifndef NVTX_DISABLE + nvtxDomainResourceDestroy_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr; + if(local!=0) + (*local)(resource); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxDomainNameCategoryA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr; + if(local!=0) + (*local)(domain, category, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxDomainNameCategoryW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr; + if(local!=0) + (*local)(domain, category, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string) +{ +#ifndef NVTX_DISABLE + nvtxDomainRegisterStringA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr; + if(local!=0) + return (*local)(domain, string); + else +#endif /*NVTX_DISABLE*/ + return (nvtxStringHandle_t)0; +} + +NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string) +{ +#ifndef NVTX_DISABLE + nvtxDomainRegisterStringW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr; + if(local!=0) + return (*local)(domain, string); + else +#endif /*NVTX_DISABLE*/ + return (nvtxStringHandle_t)0; +} + +NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* message) +{ +#ifndef NVTX_DISABLE + nvtxDomainCreateA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr; + if(local!=0) + return (*local)(message); + else +#endif /*NVTX_DISABLE*/ + return (nvtxDomainHandle_t)0; +} + +NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* message) +{ +#ifndef NVTX_DISABLE + nvtxDomainCreateW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr; + if(local!=0) + return (*local)(message); + else +#endif /*NVTX_DISABLE*/ + return (nvtxDomainHandle_t)0; +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain) +{ +#ifndef NVTX_DISABLE + nvtxDomainDestroy_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr; + if(local!=0) + (*local)(domain); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxInitialize(const void* reserved) +{ +#ifndef NVTX_DISABLE + nvtxInitialize_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr; + if(local!=0) + (*local)(reserved); +#endif /*NVTX_DISABLE*/ +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h new file mode 100644 index 0000000000000000000000000000000000000000..24be8bd4722506d4ca318b07519884fc407847cd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h @@ -0,0 +1,112 @@ +/* This file was procedurally generated! Do not modify this file by hand. */ + +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#ifndef NVTX_IMPL_GUARD_CUDART +#error Never include this file directly -- it is automatically included by nvToolsExtCudaRt.h (except when NVTX_NO_IMPL is defined). +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +//typedef void (NVTX_API * nvtxNameCudaDeviceA_impl_fntype)(int device, const char* name); +//typedef void (NVTX_API * nvtxNameCudaDeviceW_impl_fntype)(int device, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCudaStreamA_impl_fntype)(cudaStream_t stream, const char* name); +typedef void (NVTX_API * nvtxNameCudaStreamW_impl_fntype)(cudaStream_t stream, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCudaEventA_impl_fntype)(cudaEvent_t event, const char* name); +typedef void (NVTX_API * nvtxNameCudaEventW_impl_fntype)(cudaEvent_t event, const wchar_t* name); + +NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceA(int device, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCudaDeviceA_impl_fntype local = (nvtxNameCudaDeviceA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr; + if(local!=0) + (*local)(device, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceW(int device, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCudaDeviceW_impl_fntype local = (nvtxNameCudaDeviceW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr; + if(local!=0) + (*local)(device, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamA(cudaStream_t stream, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCudaStreamA_impl_fntype local = (nvtxNameCudaStreamA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr; + if(local!=0) + (*local)(stream, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamW(cudaStream_t stream, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCudaStreamW_impl_fntype local = (nvtxNameCudaStreamW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr; + if(local!=0) + (*local)(stream, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventA(cudaEvent_t event, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCudaEventA_impl_fntype local = (nvtxNameCudaEventA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr; + if(local!=0) + (*local)(event, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventW(cudaEvent_t event, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCudaEventW_impl_fntype local = (nvtxNameCudaEventW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr; + if(local!=0) + (*local)(event, name); +#endif /*NVTX_DISABLE*/ +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h new file mode 100644 index 0000000000000000000000000000000000000000..ee30111bd123b2c1706f23e73e3803b8c1a02927 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h @@ -0,0 +1,133 @@ +/* This file was procedurally generated! Do not modify this file by hand. */ + +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#ifndef NVTX_IMPL_GUARD_CUDA +#error Never include this file directly -- it is automatically included by nvToolsExtCuda.h (except when NVTX_NO_IMPL is defined). +#endif + + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef void (NVTX_API * nvtxNameCuDeviceA_impl_fntype)(CUdevice device, const char* name); +typedef void (NVTX_API * nvtxNameCuDeviceW_impl_fntype)(CUdevice device, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCuContextA_impl_fntype)(CUcontext context, const char* name); +typedef void (NVTX_API * nvtxNameCuContextW_impl_fntype)(CUcontext context, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCuStreamA_impl_fntype)(CUstream stream, const char* name); +typedef void (NVTX_API * nvtxNameCuStreamW_impl_fntype)(CUstream stream, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCuEventA_impl_fntype)(CUevent event, const char* name); +typedef void (NVTX_API * nvtxNameCuEventW_impl_fntype)(CUevent event, const wchar_t* name); + +NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuDeviceA_impl_fntype local = (nvtxNameCuDeviceA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr; + if(local!=0) + (*local)(device, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuDeviceW_impl_fntype local = (nvtxNameCuDeviceW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr; + if(local!=0) + (*local)(device, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuContextA_impl_fntype local = (nvtxNameCuContextA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr; + if(local!=0) + (*local)(context, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuContextW_impl_fntype local = (nvtxNameCuContextW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr; + if(local!=0) + (*local)(context, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuStreamA_impl_fntype local = (nvtxNameCuStreamA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr; + if(local!=0) + (*local)(stream, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuStreamW_impl_fntype local = (nvtxNameCuStreamW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr; + if(local!=0) + (*local)(stream, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuEventA_impl_fntype local = (nvtxNameCuEventA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr; + if(local!=0) + (*local)(event, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameCuEventW_impl_fntype local = (nvtxNameCuEventW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr; + if(local!=0) + (*local)(event, name); +#endif /*NVTX_DISABLE*/ +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h new file mode 100644 index 0000000000000000000000000000000000000000..0e73224cce26b49c8f7585be3a41dd0f428fe07e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h @@ -0,0 +1,192 @@ +/* This file was procedurally generated! Do not modify this file by hand. */ + +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#ifndef NVTX_IMPL_GUARD_OPENCL +#error Never include this file directly -- it is automatically included by nvToolsExtCuda.h (except when NVTX_NO_IMPL is defined). +#endif + + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef void (NVTX_API * nvtxNameClDeviceA_impl_fntype)(cl_device_id device, const char* name); +typedef void (NVTX_API * nvtxNameClDeviceW_impl_fntype)(cl_device_id device, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClContextA_impl_fntype)(cl_context context, const char* name); +typedef void (NVTX_API * nvtxNameClContextW_impl_fntype)(cl_context context, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClCommandQueueA_impl_fntype)(cl_command_queue command_queue, const char* name); +typedef void (NVTX_API * nvtxNameClCommandQueueW_impl_fntype)(cl_command_queue command_queue, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClMemObjectA_impl_fntype)(cl_mem memobj, const char* name); +typedef void (NVTX_API * nvtxNameClMemObjectW_impl_fntype)(cl_mem memobj, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClSamplerA_impl_fntype)(cl_sampler sampler, const char* name); +typedef void (NVTX_API * nvtxNameClSamplerW_impl_fntype)(cl_sampler sampler, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClProgramA_impl_fntype)(cl_program program, const char* name); +typedef void (NVTX_API * nvtxNameClProgramW_impl_fntype)(cl_program program, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClEventA_impl_fntype)(cl_event evnt, const char* name); +typedef void (NVTX_API * nvtxNameClEventW_impl_fntype)(cl_event evnt, const wchar_t* name); + +NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceA(cl_device_id device, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClDeviceA_impl_fntype local = (nvtxNameClDeviceA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr; + if(local!=0) + (*local)(device, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceW(cl_device_id device, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClDeviceW_impl_fntype local = (nvtxNameClDeviceW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr; + if(local!=0) + (*local)(device, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClContextA(cl_context context, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClContextA_impl_fntype local = (nvtxNameClContextA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr; + if(local!=0) + (*local)(context, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClContextW(cl_context context, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClContextW_impl_fntype local = (nvtxNameClContextW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr; + if(local!=0) + (*local)(context, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueA(cl_command_queue command_queue, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClCommandQueueA_impl_fntype local = (nvtxNameClCommandQueueA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr; + if(local!=0) + (*local)(command_queue, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueW(cl_command_queue command_queue, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClCommandQueueW_impl_fntype local = (nvtxNameClCommandQueueW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr; + if(local!=0) + (*local)(command_queue, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectA(cl_mem memobj, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClMemObjectA_impl_fntype local = (nvtxNameClMemObjectA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr; + if(local!=0) + (*local)(memobj, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectW(cl_mem memobj, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClMemObjectW_impl_fntype local = (nvtxNameClMemObjectW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr; + if(local!=0) + (*local)(memobj, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerA(cl_sampler sampler, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClSamplerA_impl_fntype local = (nvtxNameClSamplerA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr; + if(local!=0) + (*local)(sampler, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerW(cl_sampler sampler, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClSamplerW_impl_fntype local = (nvtxNameClSamplerW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr; + if(local!=0) + (*local)(sampler, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClProgramA(cl_program program, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClProgramA_impl_fntype local = (nvtxNameClProgramA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr; + if(local!=0) + (*local)(program, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClProgramW(cl_program program, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClProgramW_impl_fntype local = (nvtxNameClProgramW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr; + if(local!=0) + (*local)(program, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClEventA(cl_event evnt, const char* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClEventA_impl_fntype local = (nvtxNameClEventA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr; + if(local!=0) + (*local)(evnt, name); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxNameClEventW(cl_event evnt, const wchar_t* name) +{ +#ifndef NVTX_DISABLE + nvtxNameClEventW_impl_fntype local = (nvtxNameClEventW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr; + if(local!=0) + (*local)(evnt, name); +#endif /*NVTX_DISABLE*/ +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h new file mode 100644 index 0000000000000000000000000000000000000000..accc621a3d5071438f6ed7b9e2192ea8ad56977c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h @@ -0,0 +1,114 @@ +/* This file was procedurally generated! Do not modify this file by hand. */ + +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#ifndef NVTX_IMPL_GUARD_SYNC +#error Never include this file directly -- it is automatically included by nvToolsExtCuda.h (except when NVTX_NO_IMPL is defined). +#endif + + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef nvtxSyncUser_t (NVTX_API * nvtxDomainSyncUserCreate_impl_fntype)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs); +typedef void (NVTX_API * nvtxDomainSyncUserDestroy_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserAcquireStart_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserAcquireFailed_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserAcquireSuccess_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserReleasing_impl_fntype)(nvtxSyncUser_t handle); + +NVTX_DECLSPEC nvtxSyncUser_t NVTX_API nvtxDomainSyncUserCreate(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs) +{ +#ifndef NVTX_DISABLE + nvtxDomainSyncUserCreate_impl_fntype local = (nvtxDomainSyncUserCreate_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr; + if(local!=0) + return (*local)(domain, attribs); + else +#endif /*NVTX_DISABLE*/ + return (nvtxSyncUser_t)0; +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserDestroy(nvtxSyncUser_t handle) +{ +#ifndef NVTX_DISABLE + nvtxDomainSyncUserDestroy_impl_fntype local = (nvtxDomainSyncUserDestroy_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr; + if(local!=0) + (*local)(handle); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireStart(nvtxSyncUser_t handle) +{ +#ifndef NVTX_DISABLE + nvtxDomainSyncUserAcquireStart_impl_fntype local = (nvtxDomainSyncUserAcquireStart_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr; + if(local!=0) + (*local)(handle); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireFailed(nvtxSyncUser_t handle) +{ +#ifndef NVTX_DISABLE + nvtxDomainSyncUserAcquireFailed_impl_fntype local = (nvtxDomainSyncUserAcquireFailed_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr; + if(local!=0) + (*local)(handle); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireSuccess(nvtxSyncUser_t handle) +{ +#ifndef NVTX_DISABLE + nvtxDomainSyncUserAcquireSuccess_impl_fntype local = (nvtxDomainSyncUserAcquireSuccess_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr; + if(local!=0) + (*local)(handle); +#endif /*NVTX_DISABLE*/ +} + +NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserReleasing(nvtxSyncUser_t handle) +{ +#ifndef NVTX_DISABLE + nvtxDomainSyncUserReleasing_impl_fntype local = (nvtxDomainSyncUserReleasing_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr; + if(local!=0) + (*local)(handle); +#endif /*NVTX_DISABLE*/ +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInit.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInit.h new file mode 100644 index 0000000000000000000000000000000000000000..f2bbd3a120da351f1ad1d81dde6bfb46244559cc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInit.h @@ -0,0 +1,343 @@ +/* This file was procedurally generated! Do not modify this file by hand. */ + +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +#ifndef NVTX_IMPL_GUARD +#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined). +#endif + +/* ---- Platform-independent helper definitions and functions ---- */ + +/* Prefer macros over inline functions to reduce symbol resolution at link time */ + +#if defined(_WIN32) +#define NVTX_PATHCHAR wchar_t +#define NVTX_STR(x) L##x +#define NVTX_GETENV _wgetenv +#define NVTX_BUFSIZE MAX_PATH +#define NVTX_DLLHANDLE HMODULE +#define NVTX_DLLOPEN(x) LoadLibraryW(x) +#define NVTX_DLLFUNC GetProcAddress +#define NVTX_DLLCLOSE FreeLibrary +#define NVTX_YIELD() SwitchToThread() +#define NVTX_MEMBAR() MemoryBarrier() +#define NVTX_ATOMIC_WRITE_32(address, value) InterlockedExchange((volatile LONG*)address, value) +#define NVTX_ATOMIC_CAS_32(old, address, exchange, comparand) old = InterlockedCompareExchange((volatile LONG*)address, exchange, comparand) +#elif defined(__GNUC__) +#define NVTX_PATHCHAR char +#define NVTX_STR(x) x +#define NVTX_GETENV getenv +#define NVTX_BUFSIZE PATH_MAX +#define NVTX_DLLHANDLE void* +#define NVTX_DLLOPEN(x) dlopen(x, RTLD_LAZY) +#define NVTX_DLLFUNC dlsym +#define NVTX_DLLCLOSE dlclose +#define NVTX_YIELD() sched_yield() +#define NVTX_MEMBAR() __sync_synchronize() +/* Ensure full memory barrier for atomics, to match Windows functions */ +#define NVTX_ATOMIC_WRITE_32(address, value) __sync_synchronize(); __sync_lock_test_and_set(address, value) +#define NVTX_ATOMIC_CAS_32(old, address, exchange, comparand) __sync_synchronize(); old = __sync_val_compare_and_swap(address, exchange, comparand) +#else +#error The library does not support your configuration! +#endif + +/* Define this to 1 for platforms that where pre-injected libraries can be discovered. */ +#if defined(_WIN32) +/* TODO */ +#define NVTX_SUPPORT_ALREADY_INJECTED_LIBRARY 0 +#else +#define NVTX_SUPPORT_ALREADY_INJECTED_LIBRARY 0 +#endif + +/* Define this to 1 for platforms that support environment variables */ +/* TODO: Detect UWP, a.k.a. Windows Store app, and set this to 0. */ +/* Try: #if defined(WINAPI_FAMILY_PARTITION) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) */ +#define NVTX_SUPPORT_ENV_VARS 1 + +/* Define this to 1 for platforms that support dynamic/shared libraries */ +#define NVTX_SUPPORT_DYNAMIC_INJECTION_LIBRARY 1 + +/* Injection libraries implementing InitializeInjectionNvtx2 may be statically linked, +* and this will override any dynamic injection. Useful for platforms where dynamic +* injection is not available. Since weak symbols not explicitly marked extern are +* guaranteed to be initialized to zero if no definitions are found by the linker, the +* dynamic injection process proceeds normally if pfnInitializeInjectionNvtx2 is 0. */ +#if defined(__GNUC__) && !defined(_WIN32) && !defined(__CYGWIN__) +#define NVTX_SUPPORT_STATIC_INJECTION_LIBRARY 1 +/* To statically inject an NVTX library, define InitializeInjectionNvtx2_fnptr as a normal +* symbol (not weak) pointing to the implementation of InitializeInjectionNvtx2 (which +* does not need to be named "InitializeInjectionNvtx2" as is necessary in a dynamic +* injection library. */ +__attribute__((weak)) NvtxInitializeInjectionNvtxFunc_t InitializeInjectionNvtx2_fnptr; +#else +#define NVTX_SUPPORT_STATIC_INJECTION_LIBRARY 0 +#endif + +/* This function tries to find or load an NVTX injection library and get the +* address of its InitializeInjection2 function. If such a function pointer +* is found, it is called, and passed the address of this NVTX instance's +* nvtxGetExportTable function, so the injection can attach to this instance. +* If the initialization fails for any reason, any dynamic library loaded will +* be freed, and all NVTX implementation functions will be set to no-ops. If +* initialization succeeds, NVTX functions not attached to the tool will be set +* to no-ops. This is implemented as one function instead of several small +* functions to minimize the number of weak symbols the linker must resolve. +* Order of search is: +* - Pre-injected library exporting InitializeInjectionNvtx2 +* - Loadable library exporting InitializeInjectionNvtx2 +* - Path specified by env var NVTX_INJECTION??_PATH (?? is 32 or 64) +* - On Android, libNvtxInjection??.so within the package (?? is 32 or 64) +* - Statically-linked injection library defining InitializeInjectionNvtx2_fnptr +*/ +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_VERSIONED_IDENTIFIER(nvtxInitializeInjectionLibrary)(void); +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_VERSIONED_IDENTIFIER(nvtxInitializeInjectionLibrary)(void) +{ + const char* const initFuncName = "InitializeInjectionNvtx2"; + NvtxInitializeInjectionNvtxFunc_t init_fnptr = (NvtxInitializeInjectionNvtxFunc_t)0; + NVTX_DLLHANDLE injectionLibraryHandle = (NVTX_DLLHANDLE)0; + int entryPointStatus = 0; + +#if NVTX_SUPPORT_ALREADY_INJECTED_LIBRARY + /* Use POSIX global symbol chain to query for init function from any module */ + init_fnptr = (NvtxInitializeInjectionNvtxFunc_t)NVTX_DLLFUNC(0, initFuncName); +#endif + +#if NVTX_SUPPORT_DYNAMIC_INJECTION_LIBRARY + /* Try discovering dynamic injection library to load */ + if (!init_fnptr) + { +#if NVTX_SUPPORT_ENV_VARS + /* If env var NVTX_INJECTION64_PATH is set, it should contain the path + * to a 64-bit dynamic NVTX injection library (and similar for 32-bit). */ + const NVTX_PATHCHAR* const nvtxEnvVarName = (sizeof(void*) == 4) + ? NVTX_STR("NVTX_INJECTION32_PATH") + : NVTX_STR("NVTX_INJECTION64_PATH"); +#endif /* NVTX_SUPPORT_ENV_VARS */ + NVTX_PATHCHAR injectionLibraryPathBuf[NVTX_BUFSIZE]; + const NVTX_PATHCHAR* injectionLibraryPath = (const NVTX_PATHCHAR*)0; + + /* Refer to this variable explicitly in case all references to it are #if'ed out */ + (void)injectionLibraryPathBuf; + +#if NVTX_SUPPORT_ENV_VARS + /* Disable the warning for getenv & _wgetenv -- this usage is safe because + * these functions are not called again before using the returned value. */ +#if defined(_MSC_VER) +#pragma warning( push ) +#pragma warning( disable : 4996 ) +#endif + injectionLibraryPath = NVTX_GETENV(nvtxEnvVarName); +#if defined(_MSC_VER) +#pragma warning( pop ) +#endif +#endif + +#if defined(__ANDROID__) + if (!injectionLibraryPath) + { + const char *bits = (sizeof(void*) == 4) ? "32" : "64"; + char cmdlineBuf[32]; + char pkgName[PATH_MAX]; + int count; + int pid; + FILE *fp; + size_t bytesRead; + size_t pos; + + pid = (int)getpid(); + count = snprintf(cmdlineBuf, sizeof(cmdlineBuf), "/proc/%d/cmdline", pid); + if (count <= 0 || count >= (int)sizeof(cmdlineBuf)) + { + NVTX_ERR("Path buffer too small for: /proc/%d/cmdline\n", pid); + return NVTX_ERR_INIT_ACCESS_LIBRARY; + } + + fp = fopen(cmdlineBuf, "r"); + if (!fp) + { + NVTX_ERR("File couldn't be opened: %s\n", cmdlineBuf); + return NVTX_ERR_INIT_ACCESS_LIBRARY; + } + + bytesRead = fread(pkgName, 1, sizeof(pkgName) - 1, fp); + fclose(fp); + if (bytesRead == 0) + { + NVTX_ERR("Package name couldn't be read from file: %s\n", cmdlineBuf); + return NVTX_ERR_INIT_ACCESS_LIBRARY; + } + + pkgName[bytesRead] = 0; + + /* String can contain colon as a process separator. In this case the package name is before the colon. */ + pos = 0; + while (pos < bytesRead && pkgName[pos] != ':' && pkgName[pos] != '\0') + { + ++pos; + } + pkgName[pos] = 0; + + count = snprintf(injectionLibraryPathBuf, NVTX_BUFSIZE, "/data/data/%s/files/libNvtxInjection%s.so", pkgName, bits); + if (count <= 0 || count >= NVTX_BUFSIZE) + { + NVTX_ERR("Path buffer too small for: /data/data/%s/files/libNvtxInjection%s.so\n", pkgName, bits); + return NVTX_ERR_INIT_ACCESS_LIBRARY; + } + + /* On Android, verify path is accessible due to aggressive file access restrictions. */ + /* For dlopen, if the filename contains a leading slash, then it is interpreted as a */ + /* relative or absolute pathname; otherwise it will follow the rules in ld.so. */ + if (injectionLibraryPathBuf[0] == '/') + { +#if (__ANDROID_API__ < 21) + int access_err = access(injectionLibraryPathBuf, F_OK | R_OK); +#else + int access_err = faccessat(AT_FDCWD, injectionLibraryPathBuf, F_OK | R_OK, 0); +#endif + if (access_err != 0) + { + NVTX_ERR("Injection library path wasn't accessible [code=%s] [path=%s]\n", strerror(errno), injectionLibraryPathBuf); + return NVTX_ERR_INIT_ACCESS_LIBRARY; + } + } + injectionLibraryPath = injectionLibraryPathBuf; + } +#endif + + /* At this point, injectionLibraryPath is specified if a dynamic + * injection library was specified by a tool. */ + if (injectionLibraryPath) + { + /* Load the injection library */ + injectionLibraryHandle = NVTX_DLLOPEN(injectionLibraryPath); + if (!injectionLibraryHandle) + { + NVTX_ERR("Failed to load injection library\n"); + return NVTX_ERR_INIT_LOAD_LIBRARY; + } + else + { + /* Attempt to get the injection library's entry-point */ + init_fnptr = (NvtxInitializeInjectionNvtxFunc_t)NVTX_DLLFUNC(injectionLibraryHandle, initFuncName); + if (!init_fnptr) + { + NVTX_DLLCLOSE(injectionLibraryHandle); + NVTX_ERR("Failed to get address of function InitializeInjectionNvtx2 from injection library\n"); + return NVTX_ERR_INIT_MISSING_LIBRARY_ENTRY_POINT; + } + } + } + } +#endif + +#if NVTX_SUPPORT_STATIC_INJECTION_LIBRARY + if (!init_fnptr) + { + /* Check weakly-defined function pointer. A statically-linked injection can define this as + * a normal symbol and it will take precedence over a dynamic injection. */ + if (InitializeInjectionNvtx2_fnptr) + { + init_fnptr = InitializeInjectionNvtx2_fnptr; + } + } +#endif + + /* At this point, if init_fnptr is not set, then no tool has specified + * an NVTX injection library -- return non-success result so all NVTX + * API functions will be set to no-ops. */ + if (!init_fnptr) + { + return NVTX_ERR_NO_INJECTION_LIBRARY_AVAILABLE; + } + + /* Invoke injection library's initialization function. If it returns + * 0 (failure) and a dynamic injection was loaded, unload it. */ + entryPointStatus = init_fnptr(NVTX_VERSIONED_IDENTIFIER(nvtxGetExportTable)); + if (entryPointStatus == 0) + { + NVTX_ERR("Failed to initialize injection library -- initialization function returned 0\n"); + if (injectionLibraryHandle) + { + NVTX_DLLCLOSE(injectionLibraryHandle); + } + return NVTX_ERR_INIT_FAILED_LIBRARY_ENTRY_POINT; + } + + return NVTX_SUCCESS; +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(void) +{ + unsigned int old; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState == NVTX_INIT_STATE_COMPLETE) + { + return; + } + + NVTX_ATOMIC_CAS_32( + old, + &NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState, + NVTX_INIT_STATE_STARTED, + NVTX_INIT_STATE_FRESH); + if (old == NVTX_INIT_STATE_FRESH) + { + int result; + int forceAllToNoops; + + /* Load & initialize injection library -- it will assign the function pointers */ + result = NVTX_VERSIONED_IDENTIFIER(nvtxInitializeInjectionLibrary)(); + + /* Set all pointers not assigned by the injection to null */ + forceAllToNoops = result != NVTX_SUCCESS; /* Set all to null if injection init failed */ + NVTX_VERSIONED_IDENTIFIER(nvtxSetInitFunctionsToNoops)(forceAllToNoops); + + /* Signal that initialization has finished, so now the assigned function pointers will be used */ + NVTX_ATOMIC_WRITE_32( + &NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState, + NVTX_INIT_STATE_COMPLETE); + } + else /* Spin-wait until initialization has finished */ + { + NVTX_MEMBAR(); + while (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState != NVTX_INIT_STATE_COMPLETE) + { + NVTX_YIELD(); + NVTX_MEMBAR(); + } + } +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDecls.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDecls.h new file mode 100644 index 0000000000000000000000000000000000000000..757a7296093750e75721fb93855d0aa37da64103 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDecls.h @@ -0,0 +1,73 @@ +#ifndef NVTX_IMPL_GUARD +#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined). +#endif + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init)(const nvtxEventAttributes_t* eventAttrib); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init)(const char* message); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init)(const wchar_t* message); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init)(const nvtxEventAttributes_t* eventAttrib); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init)(const char* message); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init)(const wchar_t* message); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init)(nvtxRangeId_t id); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init)(const nvtxEventAttributes_t* eventAttrib); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init)(const char* message); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init)(const wchar_t* message); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init)(void); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init)(uint32_t category, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init)(uint32_t category, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init)(uint32_t threadId, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init)(uint32_t threadId, const wchar_t* name); + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init)(nvtx_CUdevice device, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init)(nvtx_CUdevice device, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init)(nvtx_CUcontext context, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init)(nvtx_CUcontext context, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init)(nvtx_CUstream stream, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init)(nvtx_CUstream stream, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init)(nvtx_CUevent event, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init)(nvtx_CUevent event, const wchar_t* name); + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init)(nvtx_cl_device_id device, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init)(nvtx_cl_device_id device, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init)(nvtx_cl_context context, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init)(nvtx_cl_context context, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init)(nvtx_cl_command_queue command_queue, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init)(nvtx_cl_command_queue command_queue, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init)(nvtx_cl_mem memobj, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init)(nvtx_cl_mem memobj, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init)(nvtx_cl_sampler sampler, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init)(nvtx_cl_sampler sampler, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init)(nvtx_cl_program program, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init)(nvtx_cl_program program, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init)(nvtx_cl_event evnt, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init)(nvtx_cl_event evnt, const wchar_t* name); + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init)(int device, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init)(int device, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init)(nvtx_cudaStream_t stream, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init)(nvtx_cudaStream_t stream, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init)(nvtx_cudaEvent_t event, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init)(nvtx_cudaEvent_t event, const wchar_t* name); + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init)(nvtxDomainHandle_t domain, nvtxRangeId_t id); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init)(nvtxDomainHandle_t domain); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxResourceHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init)(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init)(nvtxResourceHandle_t resource); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const char* name); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init)(nvtxDomainHandle_t domain, const char* string); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init)(nvtxDomainHandle_t domain, const wchar_t* string); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init)(const char* message); +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init)(const wchar_t* message); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init)(nvtxDomainHandle_t domain); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init)(const void* reserved); + +NVTX_LINKONCE_FWDDECL_FUNCTION nvtxSyncUser_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init)(nvtxSyncUser_t handle); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init)(nvtxSyncUser_t handle); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init)(nvtxSyncUser_t handle); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init)(nvtxSyncUser_t handle); +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init)(nvtxSyncUser_t handle); diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDefs.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDefs.h new file mode 100644 index 0000000000000000000000000000000000000000..93d5f0e975059b00f8fb32d95ab06483cc53d80e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDefs.h @@ -0,0 +1,565 @@ +#ifndef NVTX_IMPL_GUARD +#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined). +#endif + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init)(const nvtxEventAttributes_t* eventAttrib){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxMarkEx(eventAttrib); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init)(const char* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxMarkA(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init)(const wchar_t* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxMarkW(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init)(const nvtxEventAttributes_t* eventAttrib){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangeStartEx(eventAttrib); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init)(const char* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangeStartA(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init)(const wchar_t* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangeStartW(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init)(nvtxRangeId_t id){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxRangeEnd(id); +} + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init)(const nvtxEventAttributes_t* eventAttrib){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangePushEx(eventAttrib); +} + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init)(const char* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangePushA(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init)(const wchar_t* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangePushW(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init)(void){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxRangePop(); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init)(uint32_t category, const char* name){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxNameCategoryA(category, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init)(uint32_t category, const wchar_t* name){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxNameCategoryW(category, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init)(uint32_t threadId, const char* name){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxNameOsThreadA(threadId, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init)(uint32_t threadId, const wchar_t* name){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxNameOsThreadW(threadId, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxDomainMarkEx(domain, eventAttrib); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainRangeStartEx(domain, eventAttrib); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init)(nvtxDomainHandle_t domain, nvtxRangeId_t id){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxDomainRangeEnd(domain, id); +} + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainRangePushEx(domain, eventAttrib); +} + +NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init)(nvtxDomainHandle_t domain){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainRangePop(domain); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxResourceHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init)(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainResourceCreate(domain, attribs); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init)(nvtxResourceHandle_t resource){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxDomainResourceDestroy(resource); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const char* name){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxDomainNameCategoryA(domain, category, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxDomainNameCategoryW(domain, category, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init)(nvtxDomainHandle_t domain, const char* string){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainRegisterStringA(domain, string); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init)(nvtxDomainHandle_t domain, const wchar_t* string){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainRegisterStringW(domain, string); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init)(const char* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainCreateA(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init)(const wchar_t* message){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + return nvtxDomainCreateW(message); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init)(nvtxDomainHandle_t domain){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxDomainDestroy(domain); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init)(const void* reserved){ + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + nvtxInitialize(reserved); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init)(nvtx_CUdevice device, const char* name){ + nvtxNameCuDeviceA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr; + if (local) + local(device, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init)(nvtx_CUdevice device, const wchar_t* name){ + nvtxNameCuDeviceW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr; + if (local) + local(device, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init)(nvtx_CUcontext context, const char* name){ + nvtxNameCuContextA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr; + if (local) + local(context, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init)(nvtx_CUcontext context, const wchar_t* name){ + nvtxNameCuContextW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr; + if (local) + local(context, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init)(nvtx_CUstream stream, const char* name){ + nvtxNameCuStreamA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr; + if (local) + local(stream, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init)(nvtx_CUstream stream, const wchar_t* name){ + nvtxNameCuStreamW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr; + if (local) + local(stream, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init)(nvtx_CUevent event, const char* name){ + nvtxNameCuEventA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr; + if (local) + local(event, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init)(nvtx_CUevent event, const wchar_t* name){ + nvtxNameCuEventW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr; + if (local) + local(event, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init)(int device, const char* name){ + nvtxNameCudaDeviceA_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr; + if (local) + local(device, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init)(int device, const wchar_t* name){ + nvtxNameCudaDeviceW_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr; + if (local) + local(device, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init)(nvtx_cudaStream_t stream, const char* name){ + nvtxNameCudaStreamA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr; + if (local) + local(stream, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init)(nvtx_cudaStream_t stream, const wchar_t* name){ + nvtxNameCudaStreamW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr; + if (local) + local(stream, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init)(nvtx_cudaEvent_t event, const char* name){ + nvtxNameCudaEventA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr; + if (local) + local(event, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init)(nvtx_cudaEvent_t event, const wchar_t* name){ + nvtxNameCudaEventW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr; + if (local) + local(event, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init)(nvtx_cl_device_id device, const char* name){ + nvtxNameClDeviceA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr; + if (local) + local(device, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init)(nvtx_cl_device_id device, const wchar_t* name){ + nvtxNameClDeviceW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr; + if (local) + local(device, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init)(nvtx_cl_context context, const char* name){ + nvtxNameClContextA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr; + if (local) + local(context, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init)(nvtx_cl_context context, const wchar_t* name){ + nvtxNameClContextW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr; + if (local) + local(context, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init)(nvtx_cl_command_queue command_queue, const char* name){ + nvtxNameClCommandQueueA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr; + if (local) + local(command_queue, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init)(nvtx_cl_command_queue command_queue, const wchar_t* name){ + nvtxNameClCommandQueueW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr; + if (local) + local(command_queue, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init)(nvtx_cl_mem memobj, const char* name){ + nvtxNameClMemObjectA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr; + if (local) + local(memobj, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init)(nvtx_cl_mem memobj, const wchar_t* name){ + nvtxNameClMemObjectW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr; + if (local) + local(memobj, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init)(nvtx_cl_sampler sampler, const char* name){ + nvtxNameClSamplerA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr; + if (local) + local(sampler, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init)(nvtx_cl_sampler sampler, const wchar_t* name){ + nvtxNameClSamplerW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr; + if (local) + local(sampler, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init)(nvtx_cl_program program, const char* name){ + nvtxNameClProgramA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr; + if (local) + local(program, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init)(nvtx_cl_program program, const wchar_t* name){ + nvtxNameClProgramW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr; + if (local) + local(program, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init)(nvtx_cl_event evnt, const char* name){ + nvtxNameClEventA_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr; + if (local) + local(evnt, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init)(nvtx_cl_event evnt, const wchar_t* name){ + nvtxNameClEventW_fakeimpl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr; + if (local) + local(evnt, name); +} + +NVTX_LINKONCE_DEFINE_FUNCTION nvtxSyncUser_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs){ + nvtxDomainSyncUserCreate_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr; + if (local) { + return local(domain, attribs); + } + return (nvtxSyncUser_t)0; +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init)(nvtxSyncUser_t handle){ + nvtxDomainSyncUserDestroy_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr; + if (local) + local(handle); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init)(nvtxSyncUser_t handle){ + nvtxDomainSyncUserAcquireStart_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr; + if (local) + local(handle); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init)(nvtxSyncUser_t handle){ + nvtxDomainSyncUserAcquireFailed_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr; + if (local) + local(handle); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init)(nvtxSyncUser_t handle){ + nvtxDomainSyncUserAcquireSuccess_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr; + if (local) + local(handle); +} + +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init)(nvtxSyncUser_t handle){ + nvtxDomainSyncUserReleasing_impl_fntype local; + NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(); + local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr; + if (local) + local(handle); +} + +NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxSetInitFunctionsToNoops)(int forceAllToNoops); +NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxSetInitFunctionsToNoops)(int forceAllToNoops) +{ + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr = NULL; + + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr = NULL; + + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr = NULL; + + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr = NULL; + + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr = NULL; + + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr = NULL; + if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init) || forceAllToNoops) + NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr = NULL; +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxLinkOnce.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxLinkOnce.h new file mode 100644 index 0000000000000000000000000000000000000000..6c6eaa732464519fc7d93f98b646729dab0db8b9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxLinkOnce.h @@ -0,0 +1,75 @@ +#ifndef __NVTX_LINKONCE_H__ +#define __NVTX_LINKONCE_H__ + +/* This header defines macros to permit making definitions of global variables + * and functions in C/C++ header files which may be included multiple times in + * a translation unit or linkage unit. It allows authoring header-only libraries + * which can be used by multiple other header-only libraries (either as the same + * copy or multiple copies), and does not require any build changes, such as + * adding another .c file, linking a static library, or deploying a dynamic + * library. Globals defined with these macros have the property that they have + * the same address, pointing to a single instance, for the entire linkage unit. + * It is expected but not guaranteed that each linkage unit will have a separate + * instance. + * + * In some situations it is desirable to declare a variable without initializing + * it, refer to it in code or other variables' initializers, and then initialize + * it later. Similarly, functions can be prototyped, have their address taken, + * and then have their body defined later. In such cases, use the FWDDECL macros + * when forward-declaring LINKONCE global variables without initializers and + * function prototypes, and then use the DEFINE macros when later defining them. + * Although in many cases the FWDDECL macro is equivalent to the DEFINE macro, + * following this pattern makes code maximally portable. + */ + +#if defined(__MINGW32__) /* MinGW */ + #define NVTX_LINKONCE_WEAK __attribute__((section(".gnu.linkonce.0."))) + #if defined(__cplusplus) + #define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany) + #define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" inline NVTX_LINKONCE_WEAK + #else + #define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany) + #define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_WEAK + #endif +#elif defined(_MSC_VER) /* MSVC */ + #if defined(__cplusplus) + #define NVTX_LINKONCE_DEFINE_GLOBAL extern "C" __declspec(selectany) + #define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" inline + #else + #define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany) + #define NVTX_LINKONCE_DEFINE_FUNCTION __inline + #endif +#elif defined(__CYGWIN__) && defined(__clang__) /* Clang on Cygwin */ + #define NVTX_LINKONCE_WEAK __attribute__((section(".gnu.linkonce.0."))) + #if defined(__cplusplus) + #define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_WEAK + #define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" NVTX_LINKONCE_WEAK + #else + #define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_WEAK + #define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_WEAK + #endif +#elif defined(__CYGWIN__) /* Assume GCC or compatible */ + #define NVTX_LINKONCE_WEAK __attribute__((weak)) + #if defined(__cplusplus) + #define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany) + #define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" inline + #else + #define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_WEAK + #define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_WEAK + #endif +#else /* All others: Assume GCC, clang, or compatible */ + #define NVTX_LINKONCE_WEAK __attribute__((weak)) + #define NVTX_LINKONCE_HIDDEN __attribute__((visibility("hidden"))) + #if defined(__cplusplus) + #define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_HIDDEN NVTX_LINKONCE_WEAK + #define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" NVTX_LINKONCE_HIDDEN inline + #else + #define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_HIDDEN NVTX_LINKONCE_WEAK + #define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_HIDDEN NVTX_LINKONCE_WEAK + #endif +#endif + +#define NVTX_LINKONCE_FWDDECL_GLOBAL NVTX_LINKONCE_DEFINE_GLOBAL extern +#define NVTX_LINKONCE_FWDDECL_FUNCTION NVTX_LINKONCE_DEFINE_FUNCTION + +#endif /* __NVTX_LINKONCE_H__ */ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxTypes.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxTypes.h new file mode 100644 index 0000000000000000000000000000000000000000..8ddb1a9b3baff99c78b1c9acc06ac0eb64de8d6d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxTypes.h @@ -0,0 +1,333 @@ +/* +* Copyright 2009-2016 NVIDIA Corporation. All rights reserved. +* +* NOTICE TO USER: +* +* This source code is subject to NVIDIA ownership rights under U.S. and +* international Copyright laws. +* +* This software and the information contained herein is PROPRIETARY and +* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions +* of a form of NVIDIA software license agreement. +* +* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +* OR PERFORMANCE OF THIS SOURCE CODE. +* +* U.S. Government End Users. This source code is a "commercial item" as +* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +* "commercial computer software" and "commercial computer software +* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +* and is provided to the U.S. Government only as a commercial end item. +* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +* source code with only those rights set forth herein. +* +* Any use of this source code in individual and commercial software must +* include, in the user documentation and internal comments to the code, +* the above Disclaimer and U.S. Government End Users Notice. +*/ + +/* This header defines types which are used by the internal implementation +* of NVTX and callback subscribers. API clients do not use these types, +* so they are defined here instead of in nvToolsExt.h to clarify they are +* not part of the NVTX client API. */ + +#ifndef NVTX_IMPL_GUARD +#error Never include this file directly -- it is automatically included by nvToolsExt.h. +#endif + +/* ------ Dependency-free types binary-compatible with real types ------- */ + +/* In order to avoid having the NVTX core API headers depend on non-NVTX +* headers like cuda.h, NVTX defines binary-compatible types to use for +* safely making the initialization versions of all NVTX functions without +* needing to have definitions for the real types. */ + +typedef int nvtx_CUdevice; +typedef void* nvtx_CUcontext; +typedef void* nvtx_CUstream; +typedef void* nvtx_CUevent; + +typedef void* nvtx_cudaStream_t; +typedef void* nvtx_cudaEvent_t; + +typedef void* nvtx_cl_platform_id; +typedef void* nvtx_cl_device_id; +typedef void* nvtx_cl_context; +typedef void* nvtx_cl_command_queue; +typedef void* nvtx_cl_mem; +typedef void* nvtx_cl_program; +typedef void* nvtx_cl_kernel; +typedef void* nvtx_cl_event; +typedef void* nvtx_cl_sampler; + +typedef struct nvtxSyncUser* nvtxSyncUser_t; +struct nvtxSyncUserAttributes_v0; +typedef struct nvtxSyncUserAttributes_v0 nvtxSyncUserAttributes_t; + +/* --------- Types for function pointers (with fake API types) ---------- */ + +typedef void (NVTX_API * nvtxMarkEx_impl_fntype)(const nvtxEventAttributes_t* eventAttrib); +typedef void (NVTX_API * nvtxMarkA_impl_fntype)(const char* message); +typedef void (NVTX_API * nvtxMarkW_impl_fntype)(const wchar_t* message); +typedef nvtxRangeId_t (NVTX_API * nvtxRangeStartEx_impl_fntype)(const nvtxEventAttributes_t* eventAttrib); +typedef nvtxRangeId_t (NVTX_API * nvtxRangeStartA_impl_fntype)(const char* message); +typedef nvtxRangeId_t (NVTX_API * nvtxRangeStartW_impl_fntype)(const wchar_t* message); +typedef void (NVTX_API * nvtxRangeEnd_impl_fntype)(nvtxRangeId_t id); +typedef int (NVTX_API * nvtxRangePushEx_impl_fntype)(const nvtxEventAttributes_t* eventAttrib); +typedef int (NVTX_API * nvtxRangePushA_impl_fntype)(const char* message); +typedef int (NVTX_API * nvtxRangePushW_impl_fntype)(const wchar_t* message); +typedef int (NVTX_API * nvtxRangePop_impl_fntype)(void); +typedef void (NVTX_API * nvtxNameCategoryA_impl_fntype)(uint32_t category, const char* name); +typedef void (NVTX_API * nvtxNameCategoryW_impl_fntype)(uint32_t category, const wchar_t* name); +typedef void (NVTX_API * nvtxNameOsThreadA_impl_fntype)(uint32_t threadId, const char* name); +typedef void (NVTX_API * nvtxNameOsThreadW_impl_fntype)(uint32_t threadId, const wchar_t* name); + +/* Real impl types are defined in nvtxImplCuda_v3.h, where CUDA headers are included */ +typedef void (NVTX_API * nvtxNameCuDeviceA_fakeimpl_fntype)(nvtx_CUdevice device, const char* name); +typedef void (NVTX_API * nvtxNameCuDeviceW_fakeimpl_fntype)(nvtx_CUdevice device, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCuContextA_fakeimpl_fntype)(nvtx_CUcontext context, const char* name); +typedef void (NVTX_API * nvtxNameCuContextW_fakeimpl_fntype)(nvtx_CUcontext context, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCuStreamA_fakeimpl_fntype)(nvtx_CUstream stream, const char* name); +typedef void (NVTX_API * nvtxNameCuStreamW_fakeimpl_fntype)(nvtx_CUstream stream, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCuEventA_fakeimpl_fntype)(nvtx_CUevent event, const char* name); +typedef void (NVTX_API * nvtxNameCuEventW_fakeimpl_fntype)(nvtx_CUevent event, const wchar_t* name); + +/* Real impl types are defined in nvtxImplOpenCL_v3.h, where OPENCL headers are included */ +typedef void (NVTX_API * nvtxNameClDeviceA_fakeimpl_fntype)(nvtx_cl_device_id device, const char* name); +typedef void (NVTX_API * nvtxNameClDeviceW_fakeimpl_fntype)(nvtx_cl_device_id device, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClContextA_fakeimpl_fntype)(nvtx_cl_context context, const char* name); +typedef void (NVTX_API * nvtxNameClContextW_fakeimpl_fntype)(nvtx_cl_context context, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClCommandQueueA_fakeimpl_fntype)(nvtx_cl_command_queue command_queue, const char* name); +typedef void (NVTX_API * nvtxNameClCommandQueueW_fakeimpl_fntype)(nvtx_cl_command_queue command_queue, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClMemObjectA_fakeimpl_fntype)(nvtx_cl_mem memobj, const char* name); +typedef void (NVTX_API * nvtxNameClMemObjectW_fakeimpl_fntype)(nvtx_cl_mem memobj, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClSamplerA_fakeimpl_fntype)(nvtx_cl_sampler sampler, const char* name); +typedef void (NVTX_API * nvtxNameClSamplerW_fakeimpl_fntype)(nvtx_cl_sampler sampler, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClProgramA_fakeimpl_fntype)(nvtx_cl_program program, const char* name); +typedef void (NVTX_API * nvtxNameClProgramW_fakeimpl_fntype)(nvtx_cl_program program, const wchar_t* name); +typedef void (NVTX_API * nvtxNameClEventA_fakeimpl_fntype)(nvtx_cl_event evnt, const char* name); +typedef void (NVTX_API * nvtxNameClEventW_fakeimpl_fntype)(nvtx_cl_event evnt, const wchar_t* name); + +/* Real impl types are defined in nvtxImplCudaRt_v3.h, where CUDART headers are included */ +typedef void (NVTX_API * nvtxNameCudaDeviceA_impl_fntype)(int device, const char* name); +typedef void (NVTX_API * nvtxNameCudaDeviceW_impl_fntype)(int device, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCudaStreamA_fakeimpl_fntype)(nvtx_cudaStream_t stream, const char* name); +typedef void (NVTX_API * nvtxNameCudaStreamW_fakeimpl_fntype)(nvtx_cudaStream_t stream, const wchar_t* name); +typedef void (NVTX_API * nvtxNameCudaEventA_fakeimpl_fntype)(nvtx_cudaEvent_t event, const char* name); +typedef void (NVTX_API * nvtxNameCudaEventW_fakeimpl_fntype)(nvtx_cudaEvent_t event, const wchar_t* name); + +typedef void (NVTX_API * nvtxDomainMarkEx_impl_fntype)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +typedef nvtxRangeId_t (NVTX_API * nvtxDomainRangeStartEx_impl_fntype)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +typedef void (NVTX_API * nvtxDomainRangeEnd_impl_fntype)(nvtxDomainHandle_t domain, nvtxRangeId_t id); +typedef int (NVTX_API * nvtxDomainRangePushEx_impl_fntype)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib); +typedef int (NVTX_API * nvtxDomainRangePop_impl_fntype)(nvtxDomainHandle_t domain); +typedef nvtxResourceHandle_t (NVTX_API * nvtxDomainResourceCreate_impl_fntype)(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs); +typedef void (NVTX_API * nvtxDomainResourceDestroy_impl_fntype)(nvtxResourceHandle_t resource); +typedef void (NVTX_API * nvtxDomainNameCategoryA_impl_fntype)(nvtxDomainHandle_t domain, uint32_t category, const char* name); +typedef void (NVTX_API * nvtxDomainNameCategoryW_impl_fntype)(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name); +typedef nvtxStringHandle_t (NVTX_API * nvtxDomainRegisterStringA_impl_fntype)(nvtxDomainHandle_t domain, const char* string); +typedef nvtxStringHandle_t (NVTX_API * nvtxDomainRegisterStringW_impl_fntype)(nvtxDomainHandle_t domain, const wchar_t* string); +typedef nvtxDomainHandle_t (NVTX_API * nvtxDomainCreateA_impl_fntype)(const char* message); +typedef nvtxDomainHandle_t (NVTX_API * nvtxDomainCreateW_impl_fntype)(const wchar_t* message); +typedef void (NVTX_API * nvtxDomainDestroy_impl_fntype)(nvtxDomainHandle_t domain); +typedef void (NVTX_API * nvtxInitialize_impl_fntype)(const void* reserved); + +typedef nvtxSyncUser_t (NVTX_API * nvtxDomainSyncUserCreate_impl_fntype)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs); +typedef void (NVTX_API * nvtxDomainSyncUserDestroy_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserAcquireStart_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserAcquireFailed_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserAcquireSuccess_impl_fntype)(nvtxSyncUser_t handle); +typedef void (NVTX_API * nvtxDomainSyncUserReleasing_impl_fntype)(nvtxSyncUser_t handle); + +/* ---------------- Types for callback subscription --------------------- */ + +typedef const void *(NVTX_API * NvtxGetExportTableFunc_t)(uint32_t exportTableId); +typedef int (NVTX_API * NvtxInitializeInjectionNvtxFunc_t)(NvtxGetExportTableFunc_t exportTable); + +typedef enum NvtxCallbackModule +{ + NVTX_CB_MODULE_INVALID = 0, + NVTX_CB_MODULE_CORE = 1, + NVTX_CB_MODULE_CUDA = 2, + NVTX_CB_MODULE_OPENCL = 3, + NVTX_CB_MODULE_CUDART = 4, + NVTX_CB_MODULE_CORE2 = 5, + NVTX_CB_MODULE_SYNC = 6, + /* --- New constants must only be added directly above this line --- */ + NVTX_CB_MODULE_SIZE, + NVTX_CB_MODULE_FORCE_INT = 0x7fffffff +} NvtxCallbackModule; + +typedef enum NvtxCallbackIdCore +{ + NVTX_CBID_CORE_INVALID = 0, + NVTX_CBID_CORE_MarkEx = 1, + NVTX_CBID_CORE_MarkA = 2, + NVTX_CBID_CORE_MarkW = 3, + NVTX_CBID_CORE_RangeStartEx = 4, + NVTX_CBID_CORE_RangeStartA = 5, + NVTX_CBID_CORE_RangeStartW = 6, + NVTX_CBID_CORE_RangeEnd = 7, + NVTX_CBID_CORE_RangePushEx = 8, + NVTX_CBID_CORE_RangePushA = 9, + NVTX_CBID_CORE_RangePushW = 10, + NVTX_CBID_CORE_RangePop = 11, + NVTX_CBID_CORE_NameCategoryA = 12, + NVTX_CBID_CORE_NameCategoryW = 13, + NVTX_CBID_CORE_NameOsThreadA = 14, + NVTX_CBID_CORE_NameOsThreadW = 15, + /* --- New constants must only be added directly above this line --- */ + NVTX_CBID_CORE_SIZE, + NVTX_CBID_CORE_FORCE_INT = 0x7fffffff +} NvtxCallbackIdCore; + +typedef enum NvtxCallbackIdCore2 +{ + NVTX_CBID_CORE2_INVALID = 0, + NVTX_CBID_CORE2_DomainMarkEx = 1, + NVTX_CBID_CORE2_DomainRangeStartEx = 2, + NVTX_CBID_CORE2_DomainRangeEnd = 3, + NVTX_CBID_CORE2_DomainRangePushEx = 4, + NVTX_CBID_CORE2_DomainRangePop = 5, + NVTX_CBID_CORE2_DomainResourceCreate = 6, + NVTX_CBID_CORE2_DomainResourceDestroy = 7, + NVTX_CBID_CORE2_DomainNameCategoryA = 8, + NVTX_CBID_CORE2_DomainNameCategoryW = 9, + NVTX_CBID_CORE2_DomainRegisterStringA = 10, + NVTX_CBID_CORE2_DomainRegisterStringW = 11, + NVTX_CBID_CORE2_DomainCreateA = 12, + NVTX_CBID_CORE2_DomainCreateW = 13, + NVTX_CBID_CORE2_DomainDestroy = 14, + NVTX_CBID_CORE2_Initialize = 15, + /* --- New constants must only be added directly above this line --- */ + NVTX_CBID_CORE2_SIZE, + NVTX_CBID_CORE2_FORCE_INT = 0x7fffffff +} NvtxCallbackIdCore2; + +typedef enum NvtxCallbackIdCuda +{ + NVTX_CBID_CUDA_INVALID = 0, + NVTX_CBID_CUDA_NameCuDeviceA = 1, + NVTX_CBID_CUDA_NameCuDeviceW = 2, + NVTX_CBID_CUDA_NameCuContextA = 3, + NVTX_CBID_CUDA_NameCuContextW = 4, + NVTX_CBID_CUDA_NameCuStreamA = 5, + NVTX_CBID_CUDA_NameCuStreamW = 6, + NVTX_CBID_CUDA_NameCuEventA = 7, + NVTX_CBID_CUDA_NameCuEventW = 8, + /* --- New constants must only be added directly above this line --- */ + NVTX_CBID_CUDA_SIZE, + NVTX_CBID_CUDA_FORCE_INT = 0x7fffffff +} NvtxCallbackIdCuda; + +typedef enum NvtxCallbackIdCudaRt +{ + NVTX_CBID_CUDART_INVALID = 0, + NVTX_CBID_CUDART_NameCudaDeviceA = 1, + NVTX_CBID_CUDART_NameCudaDeviceW = 2, + NVTX_CBID_CUDART_NameCudaStreamA = 3, + NVTX_CBID_CUDART_NameCudaStreamW = 4, + NVTX_CBID_CUDART_NameCudaEventA = 5, + NVTX_CBID_CUDART_NameCudaEventW = 6, + /* --- New constants must only be added directly above this line --- */ + NVTX_CBID_CUDART_SIZE, + NVTX_CBID_CUDART_FORCE_INT = 0x7fffffff +} NvtxCallbackIdCudaRt; + +typedef enum NvtxCallbackIdOpenCL +{ + NVTX_CBID_OPENCL_INVALID = 0, + NVTX_CBID_OPENCL_NameClDeviceA = 1, + NVTX_CBID_OPENCL_NameClDeviceW = 2, + NVTX_CBID_OPENCL_NameClContextA = 3, + NVTX_CBID_OPENCL_NameClContextW = 4, + NVTX_CBID_OPENCL_NameClCommandQueueA = 5, + NVTX_CBID_OPENCL_NameClCommandQueueW = 6, + NVTX_CBID_OPENCL_NameClMemObjectA = 7, + NVTX_CBID_OPENCL_NameClMemObjectW = 8, + NVTX_CBID_OPENCL_NameClSamplerA = 9, + NVTX_CBID_OPENCL_NameClSamplerW = 10, + NVTX_CBID_OPENCL_NameClProgramA = 11, + NVTX_CBID_OPENCL_NameClProgramW = 12, + NVTX_CBID_OPENCL_NameClEventA = 13, + NVTX_CBID_OPENCL_NameClEventW = 14, + /* --- New constants must only be added directly above this line --- */ + NVTX_CBID_OPENCL_SIZE, + NVTX_CBID_OPENCL_FORCE_INT = 0x7fffffff +} NvtxCallbackIdOpenCL; + +typedef enum NvtxCallbackIdSync +{ + NVTX_CBID_SYNC_INVALID = 0, + NVTX_CBID_SYNC_DomainSyncUserCreate = 1, + NVTX_CBID_SYNC_DomainSyncUserDestroy = 2, + NVTX_CBID_SYNC_DomainSyncUserAcquireStart = 3, + NVTX_CBID_SYNC_DomainSyncUserAcquireFailed = 4, + NVTX_CBID_SYNC_DomainSyncUserAcquireSuccess = 5, + NVTX_CBID_SYNC_DomainSyncUserReleasing = 6, + /* --- New constants must only be added directly above this line --- */ + NVTX_CBID_SYNC_SIZE, + NVTX_CBID_SYNC_FORCE_INT = 0x7fffffff +} NvtxCallbackIdSync; + +/* IDs for NVTX Export Tables */ +typedef enum NvtxExportTableID +{ + NVTX_ETID_INVALID = 0, + NVTX_ETID_CALLBACKS = 1, + NVTX_ETID_RESERVED0 = 2, + NVTX_ETID_VERSIONINFO = 3, + /* --- New constants must only be added directly above this line --- */ + NVTX_ETID_SIZE, + NVTX_ETID_FORCE_INT = 0x7fffffff +} NvtxExportTableID; + +typedef void (* NvtxFunctionPointer)(void); /* generic uncallable function pointer, must be casted to appropriate function type */ +typedef NvtxFunctionPointer** NvtxFunctionTable; /* double pointer because array(1) of pointers(2) to function pointers */ + +typedef struct NvtxExportTableCallbacks +{ + size_t struct_size; + + /* returns an array of pointer to function pointers*/ + int (NVTX_API *GetModuleFunctionTable)( + NvtxCallbackModule module, + NvtxFunctionTable* out_table, + unsigned int* out_size); +} NvtxExportTableCallbacks; + +typedef struct NvtxExportTableVersionInfo +{ + /* sizeof(NvtxExportTableVersionInfo) */ + size_t struct_size; + + /* The API version comes from the NVTX library linked to the app. The + * injection library is can use this info to make some assumptions */ + uint32_t version; + + /* Reserved for alignment, do not use */ + uint32_t reserved0; + + /* This must be set by tools when attaching to provide applications + * the ability to, in emergency situations, detect problematic tools + * versions and modify the NVTX source to prevent attaching anything + * that causes trouble in the app. Currently, this value is ignored. */ + void (NVTX_API *SetInjectionNvtxVersion)( + uint32_t version); +} NvtxExportTableVersionInfo; + + + + + + + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/adjacent_difference.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/adjacent_difference.h new file mode 100644 index 0000000000000000000000000000000000000000..e8385c240dd9a3466df10a73ba0adb532266d42c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/adjacent_difference.h @@ -0,0 +1,244 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file adjacent_difference.h + * \brief Compute difference between consecutive elements of a range + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup transformations Transformations + * \{ + */ + + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, \*first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), the difference of \*i and *(i - 1) + * is assigned to \*(result + (i - first)). + * + * This version of \p adjacent_difference uses operator- to calculate + * differences. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \return The iterator result + (last - first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types, + * and the return type of x - y is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the difference between adjacent elements of a range using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin()); + * + * // d_result is now [1, 1, -1, 1, -1, 1, -1, 1] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +__host__ __device__ +OutputIterator adjacent_difference(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result); + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, *first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), binary_op(\*i, \*(i - 1)) is assigned to + * \*(result + (i - first)). + * + * This version of \p adjacent_difference uses the binary function \p binary_op to + * calculate differences. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \param binary_op The binary function used to compute differences. + * \return The iterator result + (last - first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the sum between adjacent elements of a range using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin(), thrust::plus()); + * + * // d_result is now [1, 3, 3, 3, 3, 3, 3, 3] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +__host__ __device__ +OutputIterator adjacent_difference(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result, + BinaryFunction binary_op); + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, \*first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), the difference of \*i and *(i - 1) + * is assigned to \*(result + (i - first)). + * + * This version of \p adjacent_difference uses operator- to calculate + * differences. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \return The iterator result + (last - first) + * + * \tparam InputIterator is a model of Input Iterator, + * and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types, + * and the return type of x - y is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the difference between adjacent elements of a range. + * + * \code + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin()); + * + * // d_result is now [1, 1, -1, 1, -1, 1, -1, 1] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +OutputIterator adjacent_difference(InputIterator first, InputIterator last, + OutputIterator result); + +/*! \p adjacent_difference calculates the differences of adjacent elements in the + * range [first, last). That is, *first is assigned to + * \*result, and, for each iterator \p i in the range + * [first + 1, last), binary_op(\*i, \*(i - 1)) is assigned to + * \*(result + (i - first)). + * + * This version of \p adjacent_difference uses the binary function \p binary_op to + * calculate differences. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \param binary_op The binary function used to compute differences. + * \return The iterator result + (last - first) + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types. + * + * \remark Note that \p result is permitted to be the same iterator as \p first. This is + * useful for computing differences "in place". + * + * The following code snippet demonstrates how to use \p adjacent_difference to compute + * the sum between adjacent elements of a range. + * + * \code + * #include + * #include + * #include + * ... + * int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2}; + * thrust::device_vector d_data(h_data, h_data + 8); + * thrust::device_vector d_result(8); + * + * thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin(), thrust::plus()); + * + * // d_result is now [1, 3, 3, 3, 3, 3, 3, 3] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference + * \see inclusive_scan + */ +template +OutputIterator adjacent_difference(InputIterator first, InputIterator last, + OutputIterator result, + BinaryFunction binary_op); + +/*! \} + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/allocate_unique.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/allocate_unique.h new file mode 100644 index 0000000000000000000000000000000000000000..ff10cb51cd19c74cf53c75ef281c7dd5fc1298a8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/allocate_unique.h @@ -0,0 +1,443 @@ +// Copyright (c) 2018 NVIDIA Corporation +// Author: Bryce Adelstein Lelbach +// +// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) + +#pragma once + +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include +#include +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +// wg21.link/p0316r0 + +/////////////////////////////////////////////////////////////////////////////// + +namespace detail +{ + +template +void allocator_delete_impl( + Allocator const& alloc, Pointer p, std::false_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + traits::destroy(alloc_T, thrust::raw_pointer_cast(p)); + traits::deallocate(alloc_T, p, 1); + } +} + +template +void allocator_delete_impl( + Allocator const& alloc, Pointer p, std::true_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + traits::deallocate(alloc_T, p, 1); + } +} + +} // namespace detail + +template +struct allocator_delete final +{ + using allocator_type + = typename std::remove_cv< + typename std::remove_reference::type + >::type::template rebind::other; + using pointer = typename detail::allocator_traits::pointer; + + template + allocator_delete(UAllocator&& other) noexcept + : alloc_(THRUST_FWD(other)) + {} + + template + allocator_delete( + allocator_delete const& other + ) noexcept + : alloc_(other.get_allocator()) + {} + template + allocator_delete( + allocator_delete&& other + ) noexcept + : alloc_(std::move(other.get_allocator())) + {} + + template + allocator_delete& operator=( + allocator_delete const& other + ) noexcept + { + alloc_ = other.get_allocator(); + return *this; + } + template + allocator_delete& operator=( + allocator_delete&& other + ) noexcept + { + alloc_ = std::move(other.get_allocator()); + return *this; + } + + void operator()(pointer p) + { + std::integral_constant ic; + + detail::allocator_delete_impl(get_allocator(), p, ic); + } + + allocator_type& get_allocator() noexcept { return alloc_; } + allocator_type const& get_allocator() const noexcept { return alloc_; } + + void swap(allocator_delete& other) noexcept + { + using std::swap; + swap(alloc_, other.alloc_); + } + +private: + allocator_type alloc_; +}; + +template +using uninitialized_allocator_delete = allocator_delete; + +namespace detail { + +template +void array_allocator_delete_impl( + Allocator const& alloc, Pointer p, Size count, std::false_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + destroy_n(alloc_T, p, count); + traits::deallocate(alloc_T, p, count); + } +} + +template +void array_allocator_delete_impl( + Allocator const& alloc, Pointer p, Size count, std::true_type +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >; + + typename traits::allocator_type alloc_T(alloc); + + if (nullptr != pointer_traits::get(p)) + { + traits::deallocate(alloc_T, p, count); + } +} + +} // namespace detail + +template +struct array_allocator_delete final +{ + using allocator_type + = typename std::remove_cv< + typename std::remove_reference::type + >::type::template rebind::other; + using pointer = typename detail::allocator_traits::pointer; + + template + array_allocator_delete(UAllocator&& other, std::size_t n) noexcept + : alloc_(THRUST_FWD(other)), count_(n) + {} + + template + array_allocator_delete( + array_allocator_delete const& other + ) noexcept + : alloc_(other.get_allocator()), count_(other.count_) + {} + template + array_allocator_delete( + array_allocator_delete&& other + ) noexcept + : alloc_(std::move(other.get_allocator())), count_(other.count_) + {} + + template + array_allocator_delete& operator=( + array_allocator_delete const& other + ) noexcept + { + alloc_ = other.get_allocator(); + count_ = other.count_; + return *this; + } + template + array_allocator_delete& operator=( + array_allocator_delete&& other + ) noexcept + { + alloc_ = std::move(other.get_allocator()); + count_ = other.count_; + return *this; + } + + void operator()(pointer p) + { + std::integral_constant ic; + + detail::array_allocator_delete_impl(get_allocator(), p, count_, ic); + } + + allocator_type& get_allocator() noexcept { return alloc_; } + allocator_type const& get_allocator() const noexcept { return alloc_; } + + void swap(array_allocator_delete& other) noexcept + { + using std::swap; + swap(alloc_, other.alloc_); + swap(count_, other.count_); + } + +private: + allocator_type alloc_; + std::size_t count_; +}; + +template +using uninitialized_array_allocator_delete + = array_allocator_delete; + +/////////////////////////////////////////////////////////////////////////////// + +template +struct tagged_deleter : Lambda +{ + __host__ __device__ + tagged_deleter(Lambda&& l) : Lambda(THRUST_FWD(l)) {} + + using pointer = Pointer; +}; + +template +__host__ __device__ +tagged_deleter +make_tagged_deleter(Lambda&& l) +{ + return tagged_deleter(THRUST_FWD(l)); +} + +/////////////////////////////////////////////////////////////////////////////// + +template +__host__ +std::unique_ptr< + T, + allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +allocate_unique( + Allocator const& alloc, Args&&... args +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [&alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, 1); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter); + + traits::construct( + alloc_T, thrust::raw_pointer_cast(hold.get()), THRUST_FWD(args)... + ); + auto deleter = allocator_delete(alloc); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +template +__host__ +std::unique_ptr< + T, + uninitialized_allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +uninitialized_allocate_unique( + Allocator const& alloc +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [&alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, 1); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter); + + auto deleter = uninitialized_allocator_delete< + T, typename traits::allocator_type + >(alloc_T); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +template +__host__ +std::unique_ptr< + T[], + array_allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +allocate_unique_n( + Allocator const& alloc, Size n, Args&&... args +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [n, &alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, n); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter); + + uninitialized_construct_n_with_allocator( + alloc_T, hold.get(), n, THRUST_FWD(args)... + ); + auto deleter = array_allocator_delete< + T, typename traits::allocator_type + >(alloc_T, n); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +template +__host__ +std::unique_ptr< + T[], + uninitialized_array_allocator_delete< + T + , typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits::allocator_type + > +> +uninitialized_allocate_unique_n( + Allocator const& alloc, Size n +) +{ + using traits = typename detail::allocator_traits< + typename std::remove_cv< + typename std::remove_reference::type + >::type + >::template rebind_traits; + + typename traits::allocator_type alloc_T(alloc); + + auto hold_deleter = make_tagged_deleter( + [n, &alloc_T] (typename traits::pointer p) { + traits::deallocate(alloc_T, p, n); + } + ); + using hold_t = std::unique_ptr; + auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter); + + auto deleter = uninitialized_array_allocator_delete< + T, typename traits::allocator_type + >(alloc_T, n); + return std::unique_ptr + (hold.release(), std::move(deleter)); +} + +/////////////////////////////////////////////////////////////////////////////// + +THRUST_NAMESPACE_END + +#endif // THRUST_CPP_DIALECT >= 2011 + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/binary_search.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/binary_search.h new file mode 100644 index 0000000000000000000000000000000000000000..7a4746e0b165a7da2f86e04e843ee43a2086e4cc --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/binary_search.h @@ -0,0 +1,1899 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file binary_search.h + * \brief Search for values in sorted ranges. + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + + +/*! \addtogroup searching + * \ingroup algorithms + * \{ + */ + + +/*! \addtogroup binary_search Binary Search + * \ingroup searching + * \{ + */ + + +////////////////////// +// Scalar Functions // +////////////////////// + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), *j < value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that *i < value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 0); // returns input.begin() + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 2); // returns input.begin() + 1 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 8); // returns input.begin() + 4 + * thrust::lower_bound(thrust::device, input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable &value); + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), *j < value. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that *i < value. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(input.begin(), input.end(), 0); // returns input.begin() + * thrust::lower_bound(input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 2); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::lower_bound(input.begin(), input.end(), 8); // returns input.begin() + 4 + * thrust::lower_bound(input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator lower_bound(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison + * and returns the furthermost iterator \c i in [first, last) + * such that, for every iterator \c j in [first, i), + * comp(*j, value) is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(*i, value) is \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + * thrust::lower_bound(input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::lower_bound(input.begin(), input.end(), 8, thrust::less()); // returns input.begin() + 4 + * thrust::lower_bound(input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T &value, + StrictWeakOrdering comp); + + +/*! \p lower_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the first position where value could be + * inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison + * and returns the furthermost iterator \c i in [first, last) + * such that, for every iterator \c j in [first, i), + * comp(*j, value) is \c true. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(*i, value) is \c true. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::lower_bound(input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + * thrust::lower_bound(input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 1 + * thrust::lower_bound(input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::lower_bound(input.begin(), input.end(), 8, thrust::less()); // returns input.begin() + 4 + * thrust::lower_bound(input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator lower_bound(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), value < *j + * is \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that value < *i is \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelism: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 0); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 2); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 8); // returns input.end() + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable &value); + + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses operator< for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), value < *j + * is \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return The furthermost iterator \c i, such that value < *i is \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(input.begin(), input.end(), 0); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 1); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 2); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 3); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 8); // returns input.end() + * thrust::upper_bound(input.begin(), input.end(), 9); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator upper_bound(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), comp(value, *j) + * is \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(value, *i) is \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 8, thrust::less()); // returns input.end() + * thrust::upper_bound(thrust::device, input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +ForwardIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T &value, + StrictWeakOrdering comp); + +/*! \p upper_bound is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * Specifically, it returns the last position where value could be + * inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison and returns + * the furthermost iterator \c i in [first, last) such that, + * for every iterator \c j in [first, i), comp(value, *j) + * is \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return The furthermost iterator \c i, such that comp(value, *i) is \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::upper_bound(input.begin(), input.end(), 0, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 1, thrust::less()); // returns input.begin() + 1 + * thrust::upper_bound(input.begin(), input.end(), 2, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 3, thrust::less()); // returns input.begin() + 2 + * thrust::upper_bound(input.begin(), input.end(), 8, thrust::less()); // returns input.end() + * thrust::upper_bound(input.begin(), input.end(), 9, thrust::less()); // returns input.end() + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +ForwardIterator upper_bound(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * *i < value and value < *i are both \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(thrust::device, input.begin(), input.end(), 0); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 1); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 2); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 3); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 8); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 9); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +bool binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * *i < value and value < *i are both \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(input.begin(), input.end(), 0); // returns true + * thrust::binary_search(input.begin(), input.end(), 1); // returns false + * thrust::binary_search(input.begin(), input.end(), 2); // returns true + * thrust::binary_search(input.begin(), input.end(), 3); // returns false + * thrust::binary_search(input.begin(), input.end(), 8); // returns true + * thrust::binary_search(input.begin(), input.end(), 9); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +bool binary_search(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * comp(*i, value) and comp(value, *i) are both \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(thrust::device, input.begin(), input.end(), 0, thrust::less()); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 1, thrust::less()); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 2, thrust::less()); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 3, thrust::less()); // returns false + * thrust::binary_search(thrust::device, input.begin(), input.end(), 8, thrust::less()); // returns true + * thrust::binary_search(thrust::device, input.begin(), input.end(), 9, thrust::less()); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +bool binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p binary_search is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. Specifically, this version returns \c true if and only if + * there exists an iterator \c i in [first, last) such that + * comp(*i, value) and comp(value, *i) are both \c false. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return \c true if an equivalent element exists in [first, last), otherwise \c false. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::binary_search(input.begin(), input.end(), 0, thrust::less()); // returns true + * thrust::binary_search(input.begin(), input.end(), 1, thrust::less()); // returns false + * thrust::binary_search(input.begin(), input.end(), 2, thrust::less()); // returns true + * thrust::binary_search(input.begin(), input.end(), 3, thrust::less()); // returns false + * thrust::binary_search(input.begin(), input.end(), 8, thrust::less()); // returns true + * thrust::binary_search(input.begin(), input.end(), 9, thrust::less()); // returns false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +bool binary_search(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j), where \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), *k < value. \c j is the furthermost + * iterator in [first, last) such that, for every iterator + * \c k in [first, j), value < *k is \c false. + * For every iterator \c k in [i, j), neither + * value < *k nor *k < value is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(thrust::device, input.begin(), input.end(), 0); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 1); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 2); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 3); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 8); // returns [input.begin() + 4, input.end) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 9); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +__host__ __device__ +thrust::pair +equal_range(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j), where \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), *k < value. \c j is the furthermost + * iterator in [first, last) such that, for every iterator + * \c k in [first, j), value < *k is \c false. + * For every iterator \c k in [i, j), neither + * value < *k nor *k < value is \c true. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam LessThanComparable is a model of LessThanComparable. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(input.begin(), input.end(), 0); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 1); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 2); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 3); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 8); // returns [input.begin() + 4, input.end) + * thrust::equal_range(input.begin(), input.end(), 9); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +thrust::pair +equal_range(ForwardIterator first, + ForwardIterator last, + const LessThanComparable& value); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j). \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), comp(*k, value) is \c true. + * \c j is the furthermost iterator in [first, last) such + * that, for every iterator \c k in [first, last), + * comp(value, *k) is \c false. For every iterator \c k + * in [i, j), neither comp(value, *k) nor + * comp(*k, value) is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(thrust::device, input.begin(), input.end(), 0, thrust::less()); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 1, thrust::less()); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 2, thrust::less()); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 3, thrust::less()); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 8, thrust::less()); // returns [input.begin() + 4, input.end) + * thrust::equal_range(thrust::device, input.begin(), input.end(), 9, thrust::less()); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +__host__ __device__ +thrust::pair +equal_range(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \p equal_range is a version of binary search: it attempts to find + * the element value in an ordered range [first, last). The + * value returned by \p equal_range is essentially a combination of + * the values returned by \p lower_bound and \p upper_bound: it returns + * a \p pair of iterators \c i and \c j such that \c i is the first + * position where value could be inserted without violating the + * ordering and \c j is the last position where value could be inserted + * without violating the ordering. It follows that every element in the + * range [i, j) is equivalent to value, and that + * [i, j) is the largest subrange of [first, last) that + * has this property. + * + * This version of \p equal_range returns a \p pair of iterators + * [i, j). \c i is the furthermost iterator in + * [first, last) such that, for every iterator \c k in + * [first, i), comp(*k, value) is \c true. + * \c j is the furthermost iterator in [first, last) such + * that, for every iterator \c k in [first, last), + * comp(value, *k) is \c false. For every iterator \c k + * in [i, j), neither comp(value, *k) nor + * comp(*k, value) is \c true. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param value The value to be searched. + * \param comp The comparison operator. + * \return A \p pair of iterators [i, j) that define the range of equivalent elements. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam T is comparable to \p ForwardIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * The following code snippet demonstrates how to use \p equal_range + * to search for values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::equal_range(input.begin(), input.end(), 0, thrust::less()); // returns [input.begin(), input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 1, thrust::less()); // returns [input.begin() + 1, input.begin() + 1) + * thrust::equal_range(input.begin(), input.end(), 2, thrust::less()); // returns [input.begin() + 1, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 3, thrust::less()); // returns [input.begin() + 2, input.begin() + 2) + * thrust::equal_range(input.begin(), input.end(), 8, thrust::less()); // returns [input.begin() + 4, input.end) + * thrust::equal_range(input.begin(), input.end(), 9, thrust::less()); // returns [input.end(), input.end) + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal_range + * \see \p lower_bound + * \see \p upper_bound + * \see \p binary_search + */ +template +thrust::pair +equal_range(ForwardIterator first, + ForwardIterator last, + const T& value, + StrictWeakOrdering comp); + + +/*! \addtogroup vectorized_binary_search Vectorized Searches + * \ingroup binary_search + * \{ + */ + + +////////////////////// +// Vector Functions // +////////////////////// + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator lower_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator lower_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p lower_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p lower_bound uses function object \c comp for comparison. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p lower_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::lower_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [0, 1, 1, 2, 4, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/lower_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator lower_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of last position where value could + * be inserted without violating the ordering. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of last position where value could + * be inserted without violating the ordering. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p upper_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator upper_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +__host__ __device__ +OutputIterator upper_bound(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p upper_bound is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * Specifically, it returns the index of first position where value could + * be inserted without violating the ordering. This version of + * \p upper_bound uses function object \c comp for comparison. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator. + * and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p upper_bound + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::upper_bound(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [1, 1, 2, 2, 5, 5] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/upper_bound + * \see \p lower_bound + * \see \p equal_range + * \see \p binary_search + */ +template +OutputIterator upper_bound(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +OutputIterator binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +OutputIterator binary_search(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. This version of \p binary_search uses function object + * \c comp for comparison. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(thrust::device, + * input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +__host__ __device__ +OutputIterator binary_search(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \p binary_search is a vectorized version of binary search: for each + * iterator \c v in [values_first, values_last) it attempts to + * find the value *v in an ordered range [first, last). + * It returns \c true if an element that is equivalent to \c value + * is present in [first, last) and \c false if no such element + * exists. This version of \p binary_search uses function object + * \c comp for comparison. + * + * \param first The beginning of the ordered sequence. + * \param last The end of the ordered sequence. + * \param values_first The beginning of the search values sequence. + * \param values_last The end of the search values sequence. + * \param result The beginning of the output sequence. + * \param comp The comparison operator. + * + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam InputIterator is a model of Input Iterator. + * and \c InputIterator's \c value_type is LessThanComparable. + * \tparam OutputIterator is a model of Output Iterator. + * and bool is convertible to \c OutputIterator's \c value_type. + * \tparam StrictWeakOrdering is a model of Strict Weak Ordering. + * + * \pre The ranges [first,last) and [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p binary_search + * to search for multiple values in a ordered range. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector input(5); + * + * input[0] = 0; + * input[1] = 2; + * input[2] = 5; + * input[3] = 7; + * input[4] = 8; + * + * thrust::device_vector values(6); + * values[0] = 0; + * values[1] = 1; + * values[2] = 2; + * values[3] = 3; + * values[4] = 8; + * values[5] = 9; + * + * thrust::device_vector output(6); + * + * thrust::binary_search(input.begin(), input.end(), + * values.begin(), values.end(), + * output.begin(), + * thrust::less()); + * + * // output is now [true, false, true, false, true, false] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/binary_search + * \see \p lower_bound + * \see \p upper_bound + * \see \p equal_range + */ +template +OutputIterator binary_search(ForwardIterator first, + ForwardIterator last, + InputIterator values_first, + InputIterator values_last, + OutputIterator result, + StrictWeakOrdering comp); + + +/*! \} // end vectorized_binary_search + */ + + +/*! \} // end binary_search + */ + + +/*! \} // end searching + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/complex.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/complex.h new file mode 100644 index 0000000000000000000000000000000000000000..8c0be0d61d96353e2021d23af17775015af533b2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/complex.h @@ -0,0 +1,1047 @@ +/* + * Copyright 2008-2019 NVIDIA Corporation + * Copyright 2013 Filipe RNC Maia + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file complex.h + * \brief Complex numbers + */ + +#pragma once + +#include + +#include +#include +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 +# define THRUST_STD_COMPLEX_REAL(z) \ + reinterpret_cast< \ + const typename thrust::detail::remove_reference::type::value_type (&)[2] \ + >(z)[0] +# define THRUST_STD_COMPLEX_IMAG(z) \ + reinterpret_cast< \ + const typename thrust::detail::remove_reference::type::value_type (&)[2] \ + >(z)[1] +# define THRUST_STD_COMPLEX_DEVICE __device__ +#else +# define THRUST_STD_COMPLEX_REAL(z) (z).real() +# define THRUST_STD_COMPLEX_IMAG(z) (z).imag() +# define THRUST_STD_COMPLEX_DEVICE +#endif + +THRUST_NAMESPACE_BEGIN + +/* + * Calls to the standard math library from inside the thrust namespace + * with real arguments require explicit scope otherwise they will fail + * to resolve as it will find the equivalent complex function but then + * fail to match the template, and give up looking for other scopes. + */ + + +/*! \addtogroup numerics + * \{ + */ + +/*! \addtogroup complex_numbers Complex Numbers + * \{ + */ + +/*! \cond + */ + +namespace detail +{ + +template +struct complex_storage; + +#if THRUST_CPP_DIALECT >= 2011 \ + && (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \ + && (THRUST_GCC_VERSION >= 40800) + // C++11 implementation, excluding GCC 4.7, which doesn't have `alignas`. + template + struct complex_storage + { + struct alignas(Align) type { T x; T y; }; + }; +#elif (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) \ + || ( (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \ + && (THRUST_GCC_VERSION < 40600)) + // C++03 implementation for MSVC and GCC <= 4.5. + // + // We have to implement `aligned_type` with specializations for MSVC + // and GCC 4.2 and older because they require literals as arguments to + // their alignment attribute. + + #if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) + // MSVC implementation. + #define THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(X) \ + template \ + struct complex_storage \ + { \ + __declspec(align(X)) struct type { T x; T y; }; \ + }; \ + /**/ + #else + // GCC <= 4.2 implementation. + #define THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(X) \ + template \ + struct complex_storage \ + { \ + struct type { T x; T y; } __attribute__((aligned(X))); \ + }; \ + /**/ + #endif + + // The primary template is a fallback, which doesn't specify any alignment. + // It's only used when T is very large and we're using an older compilers + // which we have to fully specialize each alignment case. + template + struct complex_storage + { + T x; T y; + }; + + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(1); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(2); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(4); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(8); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(16); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(32); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(64); + THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(128); + + #undef THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION +#else + // C++03 implementation for GCC > 4.5, Clang, PGI, ICPC, and xlC. + template + struct complex_storage + { + struct type { T x; T y; } __attribute__((aligned(Align))); + }; +#endif + +} // end namespace detail + +/*! \endcond + */ + +/*! \p complex is the Thrust equivalent to std::complex. It is + * functionally identical to it, but can also be used in device code which + * std::complex currently cannot. + * + * \tparam T The type used to hold the real and imaginary parts. Should be + * float or double. Others types are not supported. + * + */ +template +struct complex +{ +public: + + /*! \p value_type is the type of \p complex's real and imaginary parts. + */ + typedef T value_type; + + + + /* --- Constructors --- */ + + /*! Construct a complex number with an imaginary part of 0. + * + * \param re The real part of the number. + */ + __host__ __device__ + complex(const T& re); + + /*! Construct a complex number from its real and imaginary parts. + * + * \param re The real part of the number. + * \param im The imaginary part of the number. + */ + __host__ __device__ + complex(const T& re, const T& im); + +#if THRUST_CPP_DIALECT >= 2011 + /*! Default construct a complex number. + */ + complex() = default; + + /*! This copy constructor copies from a \p complex with a type that is + * convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + complex(const complex& z) = default; +#else + /*! Default construct a complex number. + */ + __host__ __device__ + complex(); + + /*! This copy constructor copies from a \p complex with a type that is + * convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + __host__ __device__ + complex(const complex& z); +#endif + + /*! This converting copy constructor copies from a \p complex with a type + * that is convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex(const complex& z); + + /*! This converting copy constructor copies from a std::complex with + * a type that is convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + */ + __host__ THRUST_STD_COMPLEX_DEVICE + complex(const std::complex& z); + + /*! This converting copy constructor copies from a std::complex with + * a type that is convertible to this \p complex's \c value_type. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ THRUST_STD_COMPLEX_DEVICE + complex(const std::complex& z); + + + + /* --- Assignment Operators --- */ + + /*! Assign `re` to the real part of this \p complex and set the imaginary part + * to 0. + * + * \param re The real part of the number. + */ + __host__ __device__ + complex& operator=(const T& re); + +#if THRUST_CPP_DIALECT >= 2011 + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + complex& operator=(const complex& z) = default; +#else + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + __host__ __device__ + complex& operator=(const complex& z); +#endif + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator=(const complex& z); + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + */ + __host__ THRUST_STD_COMPLEX_DEVICE + complex& operator=(const std::complex& z); + + /*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this + * \p complex respectively. + * + * \param z The \p complex to copy from. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ THRUST_STD_COMPLEX_DEVICE + complex& operator=(const std::complex& z); + + + /* --- Compound Assignment Operators --- */ + + /*! Adds a \p complex to this \p complex and assigns the result to this + * \p complex. + * + * \param z The \p complex to be added. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator+=(const complex& z); + + /*! Subtracts a \p complex from this \p complex and assigns the result to + * this \p complex. + * + * \param z The \p complex to be subtracted. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator-=(const complex& z); + + /*! Multiplies this \p complex by another \p complex and assigns the result + * to this \p complex. + * + * \param z The \p complex to be multiplied. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator*=(const complex& z); + + /*! Divides this \p complex by another \p complex and assigns the result to + * this \p complex. + * + * \param z The \p complex to be divided. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator/=(const complex& z); + + /*! Adds a scalar to this \p complex and assigns the result to this + * \p complex. + * + * \param z The \p complex to be added. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator+=(const U& z); + + /*! Subtracts a scalar from this \p complex and assigns the result to + * this \p complex. + * + * \param z The scalar to be subtracted. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator-=(const U& z); + + /*! Multiplies this \p complex by a scalar and assigns the result + * to this \p complex. + * + * \param z The scalar to be multiplied. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator*=(const U& z); + + /*! Divides this \p complex by a scalar and assigns the result to + * this \p complex. + * + * \param z The scalar to be divided. + * + * \tparam U is convertible to \c value_type. + */ + template + __host__ __device__ + complex& operator/=(const U& z); + + + + /* --- Getter functions --- + * The volatile ones are there to help for example + * with certain reductions optimizations + */ + + /*! Returns the real part of this \p complex. + */ + __host__ __device__ + T real() const volatile { return data.x; } + + /*! Returns the imaginary part of this \p complex. + */ + __host__ __device__ + T imag() const volatile { return data.y; } + + /*! Returns the real part of this \p complex. + */ + __host__ __device__ + T real() const { return data.x; } + + /*! Returns the imaginary part of this \p complex. + */ + __host__ __device__ + T imag() const { return data.y; } + + + + /* --- Setter functions --- + * The volatile ones are there to help for example + * with certain reductions optimizations + */ + + /*! Sets the real part of this \p complex. + * + * \param re The new real part of this \p complex. + */ + __host__ __device__ + void real(T re) volatile { data.x = re; } + + /*! Sets the imaginary part of this \p complex. + * + * \param im The new imaginary part of this \p complex.e + */ + __host__ __device__ + void imag(T im) volatile { data.y = im; } + + /*! Sets the real part of this \p complex. + * + * \param re The new real part of this \p complex. + */ + __host__ __device__ + void real(T re) { data.x = re; } + + /*! Sets the imaginary part of this \p complex. + * + * \param im The new imaginary part of this \p complex. + */ + __host__ __device__ + void imag(T im) { data.y = im; } + + + + /* --- Casting functions --- */ + + /*! Casts this \p complex to a std::complex of the same type. + */ + __host__ + operator std::complex() const { return std::complex(real(), imag()); } + +private: + typename detail::complex_storage::type data; +}; + + +/* --- General Functions --- */ + +/*! Returns the magnitude (also known as absolute value) of a \p complex. + * + * \param z The \p complex from which to calculate the absolute value. + */ +template +__host__ __device__ +T abs(const complex& z); + +/*! Returns the phase angle (also known as argument) in radians of a \p complex. + * + * \param z The \p complex from which to calculate the phase angle. + */ +template +__host__ __device__ +T arg(const complex& z); + +/*! Returns the square of the magnitude of a \p complex. + * + * \param z The \p complex from which to calculate the norm. + */ +template +__host__ __device__ +T norm(const complex& z); + +/*! Returns the complex conjugate of a \p complex. + * + * \param z The \p complex from which to calculate the complex conjugate. + */ +template +__host__ __device__ +complex conj(const complex& z); + +/*! Returns a \p complex with the specified magnitude and phase. + * + * \param m The magnitude of the returned \p complex. + * \param theta The phase of the returned \p complex in radians. + */ +template +__host__ __device__ +complex::type> +polar(const T0& m, const T1& theta = T1()); + +/*! Returns the projection of a \p complex on the Riemann sphere. + * For all finite \p complex it returns the argument. For \p complexs + * with a non finite part returns (INFINITY,+/-0) where the sign of + * the zero matches the sign of the imaginary part of the argument. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex proj(const T& z); + + + +/* --- Binary Arithmetic operators --- */ + +/*! Adds two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +complex::type> +operator+(const complex& x, const complex& y); + +/*! Adds a scalar to a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +complex::type> +operator+(const complex& x, const T1& y); + +/*! Adds a \p complex number to a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +complex::type> +operator+(const T0& x, const complex& y); + +/*! Subtracts two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The first \p complex (minuend). + * \param y The second \p complex (subtrahend). + */ +template +__host__ __device__ +complex::type> +operator-(const complex& x, const complex& y); + +/*! Subtracts a scalar from a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The \p complex (minuend). + * \param y The scalar (subtrahend). + */ +template +__host__ __device__ +complex::type> +operator-(const complex& x, const T1& y); + +/*! Subtracts a \p complex number from a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar (minuend). + * \param y The \p complex (subtrahend). + */ +template +__host__ __device__ +complex::type> +operator-(const T0& x, const complex& y); + +/*! Multiplies two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +complex::type> +operator*(const complex& x, const complex& y); + +/*! Multiplies a \p complex number by a scalar. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +complex::type> +operator*(const complex& x, const T1& y); + +/*! Multiplies a scalar by a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +complex::type> +operator*(const T0& x, const complex& y); + +/*! Divides two \p complex numbers. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The numerator (dividend). + * \param y The denomimator (divisor). + */ +template +__host__ __device__ +complex::type> +operator/(const complex& x, const complex& y); + +/*! Divides a \p complex number by a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The complex numerator (dividend). + * \param y The scalar denomimator (divisor). + */ +template +__host__ __device__ +complex::type> +operator/(const complex& x, const T1& y); + +/*! Divides a scalar by a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The scalar numerator (dividend). + * \param y The complex denomimator (divisor). + */ +template +__host__ __device__ +complex::type> +operator/(const T0& x, const complex& y); + + + +/* --- Unary Arithmetic operators --- */ + +/*! Unary plus, returns its \p complex argument. + * + * \param y The \p complex argument. + */ +template +__host__ __device__ +complex +operator+(const complex& y); + +/*! Unary minus, returns the additive inverse (negation) of its \p complex + * argument. + * + * \param y The \p complex argument. + */ +template +__host__ __device__ +complex +operator-(const complex& y); + + + +/* --- Exponential Functions --- */ + +/*! Returns the complex exponential of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex exp(const complex& z); + +/*! Returns the complex natural logarithm of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex log(const complex& z); + +/*! Returns the complex base 10 logarithm of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex log10(const complex& z); + + + +/* --- Power Functions --- */ + +/*! Returns a \p complex number raised to another. + * + * The value types of the two \p complex types should be compatible and the + * type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ +complex::type> +pow(const complex& x, const complex& y); + +/*! Returns a \p complex number raised to a scalar. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ +complex::type> +pow(const complex& x, const T1& y); + +/*! Returns a scalar raised to a \p complex number. + * + * The value type of the \p complex should be compatible with the scalar and + * the type of the returned \p complex is the promoted type of the two arguments. + * + * \param x The base. + * \param y The exponent. + */ +template +__host__ __device__ +complex::type> +pow(const T0& x, const complex& y); + +/*! Returns the complex square root of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex sqrt(const complex& z); + + +/* --- Trigonometric Functions --- */ + +/*! Returns the complex cosine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex cos(const complex& z); + +/*! Returns the complex sine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex sin(const complex& z); + +/*! Returns the complex tangent of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex tan(const complex& z); + + + +/* --- Hyperbolic Functions --- */ + +/*! Returns the complex hyperbolic cosine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex cosh(const complex& z); + +/*! Returns the complex hyperbolic sine of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex sinh(const complex& z); + +/*! Returns the complex hyperbolic tangent of a \p complex number. + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex tanh(const complex& z); + + + +/* --- Inverse Trigonometric Functions --- */ + +/*! Returns the complex arc cosine of a \p complex number. + * + * The range of the real part of the result is [0, Pi] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex acos(const complex& z); + +/*! Returns the complex arc sine of a \p complex number. + * + * The range of the real part of the result is [-Pi/2, Pi/2] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex asin(const complex& z); + +/*! Returns the complex arc tangent of a \p complex number. + * + * The range of the real part of the result is [-Pi/2, Pi/2] and + * the range of the imaginary part is [-inf, +inf] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex atan(const complex& z); + + + +/* --- Inverse Hyperbolic Functions --- */ + +/*! Returns the complex inverse hyperbolic cosine of a \p complex number. + * + * The range of the real part of the result is [0, +inf] and + * the range of the imaginary part is [-Pi, Pi] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex acosh(const complex& z); + +/*! Returns the complex inverse hyperbolic sine of a \p complex number. + * + * The range of the real part of the result is [-inf, +inf] and + * the range of the imaginary part is [-Pi/2, Pi/2] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex asinh(const complex& z); + +/*! Returns the complex inverse hyperbolic tangent of a \p complex number. + * + * The range of the real part of the result is [-inf, +inf] and + * the range of the imaginary part is [-Pi/2, Pi/2] + * + * \param z The \p complex argument. + */ +template +__host__ __device__ +complex atanh(const complex& z); + + + +/* --- Stream Operators --- */ + +/*! Writes to an output stream a \p complex number in the form (real, imaginary). + * + * \param os The output stream. + * \param z The \p complex number to output. + */ +template +std::basic_ostream& +operator<<(std::basic_ostream& os, const complex& z); + +/*! Reads a \p complex number from an input stream. + * + * The recognized formats are: + * - real + * - (real) + * - (real, imaginary) + * + * The values read must be convertible to the \p complex's \c value_type + * + * \param is The input stream. + * \param z The \p complex number to set. + */ +template +__host__ +std::basic_istream& +operator>>(std::basic_istream& is, complex& z); + + + +/* --- Equality Operators --- */ + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +bool operator==(const complex& x, const complex& y); + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator==(const complex& x, const std::complex& y); + +/*! Returns true if two \p complex numbers are equal and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator==(const std::complex& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is zero and + * the real part is equal to the scalar. Returns false otherwise. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +bool operator==(const T0& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is zero and + * the real part is equal to the scalar. Returns false otherwise. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +bool operator==(const complex& x, const T1& y); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ __device__ +bool operator!=(const complex& x, const complex& y); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator!=(const complex& x, const std::complex& y); + +/*! Returns true if two \p complex numbers are different and false otherwise. + * + * \param x The first \p complex. + * \param y The second \p complex. + */ +template +__host__ THRUST_STD_COMPLEX_DEVICE +bool operator!=(const std::complex& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is not zero or + * the real part is different from the scalar. Returns false otherwise. + * + * \param x The scalar. + * \param y The \p complex. + */ +template +__host__ __device__ +bool operator!=(const T0& x, const complex& y); + +/*! Returns true if the imaginary part of the \p complex number is not zero or + * the real part is different from the scalar. Returns false otherwise. + * + * \param x The \p complex. + * \param y The scalar. + */ +template +__host__ __device__ +bool operator!=(const complex& x, const T1& y); + +THRUST_NAMESPACE_END + +#include + +#undef THRUST_STD_COMPLEX_REAL +#undef THRUST_STD_COMPLEX_IMAG +#undef THRUST_STD_COMPLEX_DEVICE + +/*! \} // complex_numbers + */ + +/*! \} // numerics + */ + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/count.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/count.h new file mode 100644 index 0000000000000000000000000000000000000000..abf8b2d6c300bc930c2514960e9d421617e02294 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/count.h @@ -0,0 +1,231 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file count.h + * \brief Counting elements in a range + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + +/*! \addtogroup reductions + * \ingroup algorithms + * \{ + */ + +/*! \addtogroup counting + * \ingroup reductions + * \{ + */ + + +/*! \p count finds the number of elements in [first,last) that are equal + * to \p value. More precisely, \p count returns the number of iterators \c i in + * [first, last) such that *i == value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param value The value to be counted. + * \return The number of elements equal to \p value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be a model of must be a model of Equality Comparable. + * \tparam EqualityComparable must be a model of Equality Comparable and can be compared for equality with \c InputIterator's \c value_type + * + * The following code snippet demonstrates how to use \p count to + * count the number of instances in a range of a value of interest using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * ... + * // put 3 1s in a device_vector + * thrust::device_vector vec(5,0); + * vec[1] = 1; + * vec[3] = 1; + * vec[4] = 1; + * + * // count the 1s + * int result = thrust::count(thrust::device, vec.begin(), vec.end(), 1); + * // result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + count(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last, const EqualityComparable& value); + + + +/*! \p count finds the number of elements in [first,last) that are equal + * to \p value. More precisely, \p count returns the number of iterators \c i in + * [first, last) such that *i == value. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param value The value to be counted. + * \return The number of elements equal to \p value. + * + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be a model of must be a model of Equality Comparable. + * \tparam EqualityComparable must be a model of Equality Comparable and can be compared for equality with \c InputIterator's \c value_type + * + * The following code snippet demonstrates how to use \p count to + * count the number of instances in a range of a value of interest. + * \code + * #include + * #include + * ... + * // put 3 1s in a device_vector + * thrust::device_vector vec(5,0); + * vec[1] = 1; + * vec[3] = 1; + * vec[4] = 1; + * + * // count the 1s + * int result = thrust::count(vec.begin(), vec.end(), 1); + * // result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template + typename thrust::iterator_traits::difference_type + count(InputIterator first, InputIterator last, const EqualityComparable& value); + + +/*! \p count_if finds the number of elements in [first,last) for which + * a predicate is \c true. More precisely, \p count_if returns the number of iterators + * \c i in [first, last) such that pred(*i) == true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param pred The predicate. + * \return The number of elements where \p pred is \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type. + * \tparam Predicate must be a model of Predicate. + * + * The following code snippet demonstrates how to use \p count to + * count the number of odd numbers in a range using the \p thrust::device execution policy: + * + * \code + * #include + * #include + * #include + * ... + * struct is_odd + * { + * __host__ __device__ + * bool operator()(int &x) + * { + * return x & 1; + * } + * }; + * ... + * // fill a device_vector with even & odd numbers + * thrust::device_vector vec(5); + * vec[0] = 0; + * vec[1] = 1; + * vec[2] = 2; + * vec[3] = 3; + * vec[4] = 4; + * + * // count the odd elements in vec + * int result = thrust::count_if(thrust::device, vec.begin(), vec.end(), is_odd()); + * // result == 2 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + count_if(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last, Predicate pred); + + +/*! \p count_if finds the number of elements in [first,last) for which + * a predicate is \c true. More precisely, \p count_if returns the number of iterators + * \c i in [first, last) such that pred(*i) == true. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param pred The predicate. + * \return The number of elements where \p pred is \c true. + * + * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type. + * \tparam Predicate must be a model of Predicate. + * + * The following code snippet demonstrates how to use \p count to + * count the number of odd numbers in a range. + * \code + * #include + * #include + * ... + * struct is_odd + * { + * __host__ __device__ + * bool operator()(int &x) + * { + * return x & 1; + * } + * }; + * ... + * // fill a device_vector with even & odd numbers + * thrust::device_vector vec(5); + * vec[0] = 0; + * vec[1] = 1; + * vec[2] = 2; + * vec[3] = 3; + * vec[4] = 4; + * + * // count the odd elements in vec + * int result = thrust::count_if(vec.begin(), vec.end(), is_odd()); + * // result == 2 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/count + */ +template + typename thrust::iterator_traits::difference_type + count_if(InputIterator first, InputIterator last, Predicate pred); + + +/*! \} // end counting + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/select_system.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/select_system.h new file mode 100644 index 0000000000000000000000000000000000000000..968446162dd2f8f752865f2e6566da1b9fc89634 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/select_system.h @@ -0,0 +1,84 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace detail +{ + +// We need a way to compute the return type of `select_system`, which is found +// by using `thrust::system::detail::generic::select_system` and then making an +// ADL call. We have no trait that defines the return type. With the +// limitations of C++11 return type deduction, we need to be able to stick all +// of that into `decltype`. So, we put the using statement into a detail +// namespace, and then implement the generic dispatch function in that +// namespace. + +namespace select_system_detail +{ + +using thrust::system::detail::generic::select_system; + +struct select_system_fn final +{ + __thrust_exec_check_disable__ + template + __host__ __device__ + auto operator()( + thrust::detail::execution_policy_base const& exec0 + ) const + THRUST_DECLTYPE_RETURNS( + select_system( + thrust::detail::derived_cast(thrust::detail::strip_const(exec0)) + ) + ) + + __thrust_exec_check_disable__ + template + __host__ __device__ + auto operator()( + thrust::detail::execution_policy_base const& exec0 + , thrust::detail::execution_policy_base const& exec1 + ) const + THRUST_DECLTYPE_RETURNS( + select_system( + thrust::detail::derived_cast(thrust::detail::strip_const(exec0)) + , thrust::detail::derived_cast(thrust::detail::strip_const(exec1)) + ) + ) +}; + +} // namespace select_system_detail + +THRUST_INLINE_CONSTANT select_system_detail::select_system_fn select_system{}; + +} // detail + +THRUST_NAMESPACE_END + +#endif // THRUST_CPP_DIALECT >= 2011 + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/tuple_meta_transform.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/tuple_meta_transform.h new file mode 100644 index 0000000000000000000000000000000000000000..285cae8b4c727ad550c155e52a62bbe05c57ff81 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/tuple_meta_transform.h @@ -0,0 +1,58 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace detail +{ + +// introduce an intermediate type tuple_meta_transform_WAR_NVCC +// rather than directly specializing tuple_meta_transform with +// default argument IndexSequence = thrust::make_index_sequence::value> +// to workaround nvcc 11.0 compiler bug +template class UnaryMetaFunction, + typename IndexSequence> + struct tuple_meta_transform_WAR_NVCC; + +template class UnaryMetaFunction, + size_t... Is> + struct tuple_meta_transform_WAR_NVCC> +{ + typedef thrust::tuple< + typename UnaryMetaFunction::type>::type... + > type; +}; + +template class UnaryMetaFunction> + struct tuple_meta_transform +{ + typedef typename tuple_meta_transform_WAR_NVCC::value>>::type type; +}; + +} // end detail + +THRUST_NAMESPACE_END + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_free.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_free.h new file mode 100644 index 0000000000000000000000000000000000000000..1cd305045e425ebccd17e98b4f1e496fe5652c60 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_free.h @@ -0,0 +1,65 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief Deallocates storage allocated by \p device_malloc. + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_free deallocates memory allocated by the function \p device_malloc. + * + * \param ptr A \p device_ptr pointing to memory to be deallocated. + * + * The following code snippet demonstrates how to use \p device_free to + * deallocate memory allocated by \p device_malloc. + * + * \code + * #include + * #include + * ... + * // allocate some integers with device_malloc + * const int N = 100; + * thrust::device_ptr int_array = thrust::device_malloc(N); + * + * // manipulate integers + * ... + * + * // deallocate with device_free + * thrust::device_free(int_array); + * \endcode + * + * \see device_ptr + * \see device_malloc + */ +inline void device_free(thrust::device_ptr ptr); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_make_unique.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_make_unique.h new file mode 100644 index 0000000000000000000000000000000000000000..cdb8c31d82ff987b591f305db450d341958615cb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_make_unique.h @@ -0,0 +1,60 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file device_make_unique.h + * \brief A factory function for creating `unique_ptr`s to device objects. + */ + +#pragma once + +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// + +template +__host__ +auto device_make_unique(Args&&... args) + THRUST_TRAILING_RETURN(decltype( + uninitialized_allocate_unique(device_allocator{}) + )) +{ +#if !defined(THRUST_DOXYGEN) // This causes Doxygen to choke for some reason. + // FIXME: This is crude - we construct an unnecessary T on the host for + // `device_new`. We need a proper dispatched `construct` algorithm to + // do this properly. + auto p = uninitialized_allocate_unique(device_allocator{}); + device_new(p.get(), T(THRUST_FWD(args)...)); + return p; +#endif +} + +/////////////////////////////////////////////////////////////////////////////// + +THRUST_NAMESPACE_END + +#endif // THRUST_CPP_DIALECT >= 2011 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc.h new file mode 100644 index 0000000000000000000000000000000000000000..790ddbac3a7d809e49934f7001a20babaa7e717d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc.h @@ -0,0 +1,100 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief Allocates storage in device memory. + */ + +#pragma once + +#include +#include +#include // for std::size_t + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! This version of \p device_malloc allocates sequential device storage + * for bytes. + * + * \param n The number of bytes to allocate sequentially + * in device memory. + * \return A \p device_ptr to the newly allocated memory. + * + * The following code snippet demonstrates how to use \p device_malloc to + * allocate a range of device memory. + * + * \code + * #include + * #include + * ... + * // allocate some memory with device_malloc + * const int N = 100; + * thrust::device_ptr void_ptr = thrust::device_malloc(N); + * + * // manipulate memory + * ... + * + * // deallocate with device_free + * thrust::device_free(void_ptr); + * \endcode + * + * \see device_ptr + * \see device_free + */ +inline thrust::device_ptr device_malloc(const std::size_t n); + +/*! This version of \p device_malloc allocates sequential device storage for + * new objects of the given type. + * + * \param n The number of objects of type T to allocate + * sequentially in device memory. + * \return A \p device_ptr to the newly allocated memory. + * + * The following code snippet demonstrates how to use \p device_malloc to + * allocate a range of device memory. + * + * \code + * #include + * #include + * ... + * // allocate some integers with device_malloc + * const int N = 100; + * thrust::device_ptr int_array = thrust::device_malloc(N); + * + * // manipulate integers + * ... + * + * // deallocate with device_free + * thrust::device_free(int_array); + * \endcode + * + * \see device_ptr + * \see device_free + */ +template + inline thrust::device_ptr device_malloc(const std::size_t n); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc_allocator.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..1b15045f2f8b2d7cda3c39b666cd94aa400abff7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc_allocator.h @@ -0,0 +1,180 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief An allocator which allocates storage with \p device_malloc. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +// forward declarations to WAR circular #includes +template class device_ptr; +template device_ptr device_malloc(const std::size_t n); + +/*! \addtogroup allocators Allocators + * \ingroup memory_management + * \{ + */ + +/*! \p device_malloc_allocator is a device memory allocator that employs the + * \p device_malloc function for allocation. + * + * \p device_malloc_allocator is deprecated in favor of thrust::mr + * memory resource-based allocators. + * + * \see device_malloc + * \see device_ptr + * \see device_allocator + * \see https://en.cppreference.com/w/cpp/memory/allocator + */ +template + class device_malloc_allocator +{ + public: + /*! Type of element allocated, \c T. */ + typedef T value_type; + + /*! Pointer to allocation, \c device_ptr. */ + typedef device_ptr pointer; + + /*! \c const pointer to allocation, \c device_ptr. */ + typedef device_ptr const_pointer; + + /*! Reference to allocated element, \c device_reference. */ + typedef device_reference reference; + + /*! \c const reference to allocated element, \c device_reference. */ + typedef device_reference const_reference; + + /*! Type of allocation size, \c std::size_t. */ + typedef std::size_t size_type; + + /*! Type of allocation difference, \c pointer::difference_type. */ + typedef typename pointer::difference_type difference_type; + + /*! The \p rebind metafunction provides the type of a \p device_malloc_allocator + * instantiated with another type. + * + * \tparam U The other type to use for instantiation. + */ + template + struct rebind + { + /*! The typedef \p other gives the type of the rebound \p device_malloc_allocator. + */ + typedef device_malloc_allocator other; + }; // end rebind + + /*! No-argument constructor has no effect. */ + __host__ __device__ + inline device_malloc_allocator() {} + + /*! No-argument destructor has no effect. */ + __host__ __device__ + inline ~device_malloc_allocator() {} + + /*! Copy constructor has no effect. */ + __host__ __device__ + inline device_malloc_allocator(device_malloc_allocator const&) {} + + /*! Constructor from other \p device_malloc_allocator has no effect. */ + template + __host__ __device__ + inline device_malloc_allocator(device_malloc_allocator const&) {} + +#if THRUST_CPP_DIALECT >= 2011 + device_malloc_allocator & operator=(const device_malloc_allocator &) = default; +#endif + + /*! Returns the address of an allocated object. + * \return &r. + */ + __host__ __device__ + inline pointer address(reference r) { return &r; } + + /*! Returns the address an allocated object. + * \return &r. + */ + __host__ __device__ + inline const_pointer address(const_reference r) { return &r; } + + /*! Allocates storage for \p cnt objects. + * \param cnt The number of objects to allocate. + * \return A \p pointer to uninitialized storage for \p cnt objects. + * \note Memory allocated by this function must be deallocated with \p deallocate. + */ + __host__ + inline pointer allocate(size_type cnt, + const_pointer = const_pointer(static_cast(0))) + { + if(cnt > this->max_size()) + { + throw std::bad_alloc(); + } // end if + + return pointer(device_malloc(cnt)); + } // end allocate() + + /*! Deallocates storage for objects allocated with \p allocate. + * \param p A \p pointer to the storage to deallocate. + * \param cnt The size of the previous allocation. + * \note Memory deallocated by this function must previously have been + * allocated with \p allocate. + */ + __host__ + inline void deallocate(pointer p, size_type cnt) + { + // silence unused parameter warning while still leaving the parameter name for Doxygen + (void)(cnt); + + device_free(p); + } // end deallocate() + + /*! Returns the largest value \c n for which allocate(n) might succeed. + * \return The largest value \c n for which allocate(n) might succeed. + */ + inline size_type max_size() const + { + return (std::numeric_limits::max)() / sizeof(T); + } // end max_size() + + /*! Compares against another \p device_malloc_allocator for equality. + * \return \c true + */ + __host__ __device__ + inline bool operator==(device_malloc_allocator const&) const { return true; } + + /*! Compares against another \p device_malloc_allocator for inequality. + * \return \c false + */ + __host__ __device__ + inline bool operator!=(device_malloc_allocator const &a) const {return !operator==(a); } +}; // end device_malloc_allocator + +/*! \} // allocators + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new.h new file mode 100644 index 0000000000000000000000000000000000000000..c615e58f2d6ac548866cff0f1a8f0365c5e297b6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new.h @@ -0,0 +1,86 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file device_new.h + * \brief Constructs new elements in device memory + */ + +#pragma once + +#include + +// #include this for size_t +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! + * \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_new implements the placement \c new operator for types + * resident in device memory. \p device_new calls T's null + * constructor on a array of objects in device memory. + * No memory is allocated by this function. + * + * \param p A \p device_ptr to a region of device memory into which + * to construct one or many Ts. + * \param n The number of objects to construct at \p p. + * \return p, casted to T's type. + * + * \see device_ptr + */ +template + device_ptr device_new(device_ptr p, + const size_t n = 1); + +/*! \p device_new implements the placement new operator for types + * resident in device memory. \p device_new calls T's copy + * constructor on a array of objects in device memory. No memory is + * allocated by this function. + * + * \param p A \p device_ptr to a region of device memory into which to + * construct one or many Ts. + * \param exemplar The value from which to copy. + * \param n The number of objects to construct at \p p. + * \return p, casted to T's type. + * + * \see device_ptr + * \see fill + */ +template + device_ptr device_new(device_ptr p, + const T &exemplar, + const size_t n = 1); + +/*! \p device_new implements the new operator for types resident in device memory. + * It allocates device memory large enough to hold \p n new objects of type \c T. + * + * \param n The number of objects to allocate. Defaults to \c 1. + * \return A \p device_ptr to the newly allocated region of device memory. + */ +template + device_ptr device_new(const size_t n = 1); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new_allocator.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..c9c6b0e9507fd42efe203ff770eaa048339700be --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new_allocator.h @@ -0,0 +1,172 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief An allocator which allocates storage with \p device_new. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup allocators Allocators + * \ingroup memory_management + * \{ + */ + +/*! \p device_new_allocator is a device memory allocator that employs the + * \p device_new function for allocation. + * + * \see device_new + * \see device_ptr + * \see https://en.cppreference.com/w/cpp/memory/allocator + */ +template + class device_new_allocator +{ + public: + /*! Type of element allocated, \c T. */ + typedef T value_type; + + /*! Pointer to allocation, \c device_ptr. */ + typedef device_ptr pointer; + + /*! \c const pointer to allocation, \c device_ptr. */ + typedef device_ptr const_pointer; + + /*! Reference to allocated element, \c device_reference. */ + typedef device_reference reference; + + /*! \c const reference to allocated element, \c device_reference. */ + typedef device_reference const_reference; + + /*! Type of allocation size, \c ::cuda::std::size_t. */ + typedef ::cuda::std::size_t size_type; + + /*! Type of allocation difference, \c pointer::difference_type. */ + typedef typename pointer::difference_type difference_type; + + /*! The \p rebind metafunction provides the type of a \p device_new_allocator + * instantiated with another type. + * + * \tparam U The other type to use for instantiation. + */ + template + struct rebind + { + /*! The typedef \p other gives the type of the rebound \p device_new_allocator. + */ + typedef device_new_allocator other; + }; // end rebind + + /*! No-argument constructor has no effect. */ + __host__ __device__ + inline device_new_allocator() {} + + /*! No-argument destructor has no effect. */ + __host__ __device__ + inline ~device_new_allocator() {} + + /*! Copy constructor has no effect. */ + __host__ __device__ + inline device_new_allocator(device_new_allocator const&) {} + + /*! Constructor from other \p device_malloc_allocator has no effect. */ + template + __host__ __device__ + inline device_new_allocator(device_new_allocator const&) {} + + /*! Returns the address of an allocated object. + * \return &r. + */ + __host__ __device__ + inline pointer address(reference r) { return &r; } + + /*! Returns the address an allocated object. + * \return &r. + */ + __host__ __device__ + inline const_pointer address(const_reference r) { return &r; } + + /*! Allocates storage for \p cnt objects. + * \param cnt The number of objects to allocate. + * \return A \p pointer to uninitialized storage for \p cnt objects. + * \note Memory allocated by this function must be deallocated with \p deallocate. + */ + __host__ + inline pointer allocate(size_type cnt, + const_pointer = const_pointer(static_cast(0))) + { + if(cnt > this->max_size()) + { + throw std::bad_alloc(); + } // end if + + // use "::operator new" rather than keyword new + return pointer(device_new(cnt)); + } // end allocate() + + /*! Deallocates storage for objects allocated with \p allocate. + * \param p A \p pointer to the storage to deallocate. + * \param cnt The size of the previous allocation. + * \note Memory deallocated by this function must previously have been + * allocated with \p allocate. + */ + __host__ + inline void deallocate(pointer p, size_type cnt) + { + // use "::operator delete" rather than keyword delete + (void)cnt; + device_delete(p); + } // end deallocate() + + /*! Returns the largest value \c n for which allocate(n) might succeed. + * \return The largest value \c n for which allocate(n) might succeed. + */ + __host__ __device__ + inline size_type max_size() const + { + return ::cuda::std::numeric_limits::max THRUST_PREVENT_MACRO_SUBSTITUTION () / sizeof(T); + } // end max_size() + + /*! Compares against another \p device_malloc_allocator for equality. + * \return \c true + */ + __host__ __device__ + inline bool operator==(device_new_allocator const&) { return true; } + + /*! Compares against another \p device_malloc_allocator for inequality. + * \return \c false + */ + __host__ __device__ + inline bool operator!=(device_new_allocator const &a) {return !operator==(a); } +}; // end device_new_allocator + +/*! \} // allocators + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_reference.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_reference.h new file mode 100644 index 0000000000000000000000000000000000000000..512ab4c60c36cb030b579881008de44c1b17fb08 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_reference.h @@ -0,0 +1,987 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief A reference to an object which resides in memory associated with the + * device system. + */ + +#pragma once + +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup memory_management Memory Management + * \{ + */ + +/*! \p device_reference acts as a reference-like object to an object stored in device memory. + * \p device_reference is not intended to be used directly; rather, this type + * is the result of deferencing a \p device_ptr. Similarly, taking the address of + * a \p device_reference yields a \p device_ptr. + * + * \p device_reference may often be used from host code in place of operations defined on + * its associated \c value_type. For example, when \p device_reference refers to an + * arithmetic type, arithmetic operations on it are legal: + * + * \code + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1, 13); + * + * thrust::device_reference ref_to_thirteen = vec[0]; + * + * int x = ref_to_thirteen + 1; + * + * // x is 14 + * + * return 0; + * } + * \endcode + * + * Similarly, we can print the value of \c ref_to_thirteen in the above code by using an + * \c iostream: + * + * \code + * #include + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1, 13); + * + * thrust::device_reference ref_to_thirteen = vec[0]; + * + * std::cout << ref_to_thirteen << std::endl; + * + * // 13 is printed + * + * return 0; + * } + * \endcode + * + * Of course, we needn't explicitly create a \p device_reference in the previous + * example, because one is returned by \p device_vector's bracket operator. A more natural + * way to print the value of a \p device_vector element might be: + * + * \code + * #include + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1, 13); + * + * std::cout << vec[0] << std::endl; + * + * // 13 is printed + * + * return 0; + * } + * \endcode + * + * These kinds of operations should be used sparingly in performance-critical code, because + * they imply a potentially expensive copy between host and device space. + * + * Some operations which are possible with regular objects are impossible with their + * corresponding \p device_reference objects due to the requirements of the C++ language. For + * example, because the member access operator cannot be overloaded, member variables and functions + * of a referent object cannot be directly accessed through its \p device_reference. + * + * The following code, which generates a compiler error, illustrates: + * + * \code + * #include + * + * struct foo + * { + * int x; + * }; + * + * int main(void) + * { + * thrust::device_vector foo_vec(1); + * + * thrust::device_reference foo_ref = foo_vec[0]; + * + * foo_ref.x = 13; // ERROR: x cannot be accessed through foo_ref + * + * return 0; + * } + * \endcode + * + * Instead, a host space copy must be created to access \c foo's \c x member: + * + * \code + * #include + * + * struct foo + * { + * int x; + * }; + * + * int main(void) + * { + * thrust::device_vector foo_vec(1); + * + * // create a local host-side foo object + * foo host_foo; + * host_foo.x = 13; + * + * thrust::device_reference foo_ref = foo_vec[0]; + * + * foo_ref = host_foo; + * + * // foo_ref's x member is 13 + * + * return 0; + * } + * \endcode + * + * Another common case where a \p device_reference cannot directly be used in place of + * its referent object occurs when passing them as parameters to functions like \c printf + * which have varargs parameters. Because varargs parameters must be Plain Old Data, a + * \p device_reference to a POD type requires a cast when passed to \c printf: + * + * \code + * #include + * #include + * + * int main(void) + * { + * thrust::device_vector vec(1,13); + * + * // vec[0] must be cast to int when passing to printf + * printf("%d\n", (int) vec[0]); + * + * return 0; + * } + * \endcode + * + * \see device_ptr + * \see device_vector + */ +template + class device_reference + : public thrust::reference< + T, + thrust::device_ptr, + thrust::device_reference + > +{ + private: + typedef thrust::reference< + T, + thrust::device_ptr, + thrust::device_reference + > super_t; + + public: + /*! The type of the value referenced by this type of \p device_reference. + */ + typedef typename super_t::value_type value_type; + + /*! The type of the expression &ref, where ref is a \p device_reference. + */ + typedef typename super_t::pointer pointer; + + /*! This copy constructor accepts a const reference to another + * \p device_reference. After this \p device_reference is constructed, + * it shall refer to the same object as \p other. + * + * \param other A \p device_reference to copy from. + * + * The following code snippet demonstrates the semantics of this + * copy constructor. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_reference ref = v[0]; + * + * // ref equals the object at v[0] + * assert(ref == v[0]); + * + * // the address of ref equals the address of v[0] + * assert(&ref == &v[0]); + * + * // modifying v[0] modifies ref + * v[0] = 13; + * assert(ref == 13); + * \endcode + * + * \note This constructor is templated primarily to allow initialization of + * device_reference from device_reference. + */ + template + __host__ __device__ + device_reference(const device_reference &other, + typename thrust::detail::enable_if_convertible< + typename device_reference::pointer, + pointer + >::type * = 0) + : super_t(other) + {} + + /*! This copy constructor initializes this \p device_reference + * to refer to an object pointed to by the given \p device_ptr. After + * this \p device_reference is constructed, it shall refer to the + * object pointed to by \p ptr. + * + * \param ptr A \p device_ptr to copy from. + * + * The following code snippet demonstrates the semantic of this + * copy constructor. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals the object pointed to by ptr + * assert(ref == *ptr); + * + * // the address of ref equals ptr + * assert(&ref == ptr); + * + * // modifying *ptr modifies ref + * *ptr = 13; + * assert(ref == 13); + * \endcode + */ + __host__ __device__ + explicit device_reference(const pointer &ptr) + : super_t(ptr) + {} + + /*! This assignment operator assigns the value of the object referenced by + * the given \p device_reference to the object referenced by this + * \p device_reference. + * + * \param other The \p device_reference to assign from. + * \return *this + */ + template + __host__ __device__ + device_reference &operator=(const device_reference &other) + { + return super_t::operator=(other); + } + + /*! Assignment operator assigns the value of the given value to the + * value referenced by this \p device_reference. + * + * \param x The value to assign from. + * \return *this + */ + __host__ __device__ + device_reference &operator=(const value_type &x) + { + return super_t::operator=(x); + } + +// declare these members for the purpose of Doxygenating them +// they actually exist in a derived-from class +#if 0 + /*! Address-of operator returns a \p device_ptr pointing to the object + * referenced by this \p device_reference. It does not return the + * address of this \p device_reference. + * + * \return A \p device_ptr pointing to the object this + * \p device_reference references. + */ + __host__ __device__ + pointer operator&(void) const; + + /*! Conversion operator converts this \p device_reference to T + * by returning a copy of the object referenced by this + * \p device_reference. + * + * \return A copy of the object referenced by this \p device_reference. + */ + __host__ __device__ + operator value_type (void) const; + + /*! swaps the value this \p device_reference references with another. + * \p other The other \p device_reference with which to swap. + */ + __host__ __device__ + void swap(device_reference &other); + + /*! Prefix increment operator increments the object referenced by this + * \p device_reference. + * + * \return *this + * + * The following code snippet demonstrates the semantics of + * \p device_reference's prefix increment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // increment ref + * ++ref; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The increment executes as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator++(void); + + /*! Postfix increment operator copies the object referenced by this + * \p device_reference, increments the object referenced by this + * \p device_reference, and returns the copy. + * + * \return A copy of the object referenced by this \p device_reference + * before being incremented. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's postfix increment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // increment ref + * int x = ref++; + * + * // x equals 0 + * assert(x == 0) + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The increment executes as if it were executed on the host. + * This may change in a later version. + */ + value_type operator++(int); + + /*! Addition assignment operator add-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the add-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's addition assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // add-assign ref + * ref += 5; + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * \endcode + * + * \note The add-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator+=(const T &rhs); + + /*! Prefix decrement operator decrements the object referenced by this + * \p device_reference. + * + * \return *this + * + * The following code snippet demonstrates the semantics of + * \p device_reference's prefix decrement operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // decrement ref + * --ref; + * + * // ref equals -1 + * assert(ref == -1); + * + * // the object pointed to by ptr equals -1 + * assert(*ptr == -1); + * + * // v[0] equals -1 + * assert(v[0] == -1); + * \endcode + * + * \note The decrement executes as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator--(void); + + /*! Postfix decrement operator copies the object referenced by this + * \p device_reference, decrements the object referenced by this + * \p device_reference, and returns the copy. + * + * \return A copy of the object referenced by this \p device_reference + * before being decremented. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's postfix decrement operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // decrement ref + * int x = ref--; + * + * // x equals 0 + * assert(x == 0) + * + * // ref equals -1 + * assert(ref == -1); + * + * // the object pointed to by ptr equals -1 + * assert(*ptr == -1); + * + * // v[0] equals -1 + * assert(v[0] == -1); + * \endcode + * + * \note The decrement executes as if it were executed on the host. + * This may change in a later version. + */ + value_type operator--(int); + + /*! Subtraction assignment operator subtract-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the subtraction-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's addition assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // subtract-assign ref + * ref -= 5; + * + * // ref equals -5 + * assert(ref == -5); + * + * // the object pointed to by ptr equals -5 + * assert(*ptr == -5); + * + * // v[0] equals -5 + * assert(v[0] == -5); + * \endcode + * + * \note The subtract-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator-=(const T &rhs); + + /*! Multiplication assignment operator multiply-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the multiply-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's multiply assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // multiply-assign ref + * ref *= 5; + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * \endcode + * + * \note The multiply-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator*=(const T &rhs); + + /*! Division assignment operator divide-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the divide-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's divide assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,5); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * + * // divide-assign ref + * ref /= 5; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The divide-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator/=(const T &rhs); + + /*! Modulation assignment operator modulus-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the divide-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's divide assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,5); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 5 + * assert(ref == 5); + * + * // the object pointed to by ptr equals 5 + * assert(*ptr == 5); + * + * // v[0] equals 5 + * assert(v[0] == 5); + * + * // modulus-assign ref + * ref %= 5; + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * \endcode + * + * \note The modulus-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator%=(const T &rhs); + + /*! Bitwise left shift assignment operator left shift-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the left shift-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's left shift assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // left shift-assign ref + * ref <<= 1; + * + * // ref equals 2 + * assert(ref == 2); + * + * // the object pointed to by ptr equals 2 + * assert(*ptr == 2); + * + * // v[0] equals 2 + * assert(v[0] == 2); + * \endcode + * + * \note The left shift-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator<<=(const T &rhs); + + /*! Bitwise right shift assignment operator right shift-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the right shift-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's right shift assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,2); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 2 + * assert(ref == 2); + * + * // the object pointed to by ptr equals 2 + * assert(*ptr == 2); + * + * // v[0] equals 2 + * assert(v[0] == 2); + * + * // right shift-assign ref + * ref >>= 1; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The right shift-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator>>=(const T &rhs); + + /*! Bitwise AND assignment operator AND-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the AND-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's AND assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // right AND-assign ref + * ref &= 0; + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * \endcode + * + * \note The AND-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator&=(const T &rhs); + + /*! Bitwise OR assignment operator OR-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the OR-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's OR assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,0); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * + * // right OR-assign ref + * ref |= 1; + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * \endcode + * + * \note The OR-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator|=(const T &rhs); + + /*! Bitwise XOR assignment operator XOR-assigns the object referenced by this + * \p device_reference and returns this \p device_reference. + * + * \param rhs The right hand side of the XOR-assignment. + * \return *this. + * + * The following code snippet demonstrates the semantics of + * \p device_reference's XOR assignment operator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(1,1); + * thrust::device_ptr ptr = &v[0]; + * thrust::device_reference ref(ptr); + * + * // ref equals 1 + * assert(ref == 1); + * + * // the object pointed to by ptr equals 1 + * assert(*ptr == 1); + * + * // v[0] equals 1 + * assert(v[0] == 1); + * + * // right XOR-assign ref + * ref ^= 1; + * + * // ref equals 0 + * assert(ref == 0); + * + * // the object pointed to by ptr equals 0 + * assert(*ptr == 0); + * + * // v[0] equals 0 + * assert(v[0] == 0); + * \endcode + * + * \note The XOR-assignment executes as as if it were executed on the host. + * This may change in a later version. + */ + device_reference &operator^=(const T &rhs); +#endif // end doxygen-only members +}; // end device_reference + +/*! swaps the value of one \p device_reference with another. + * \p x The first \p device_reference of interest. + * \p y The second \p device_reference of interest. + */ +template +__host__ __device__ +void swap(device_reference& x, device_reference& y) +{ + x.swap(y); +} + +// declare these methods for the purpose of Doxygenating them +// they actually are defined for a derived-from class +#if THRUST_DOXYGEN +/*! Writes to an output stream the value of a \p device_reference. + * + * \param os The output stream. + * \param y The \p device_reference to output. + * \return os. + */ +template +std::basic_ostream & +operator<<(std::basic_ostream &os, const device_reference &y); +#endif + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_vector.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..9b97e8d70335231e05891edd1a5bed05f96bdf25 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_vector.h @@ -0,0 +1,511 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file + * \brief A dynamically-sizable array of elements which resides in memory + * accessible to devices. + */ + +#pragma once + +#include +#include +#include + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup containers Containers + * \{ + */ + +/*! A \p device_vector is a container that supports random access to elements, + * constant time removal of elements at the end, and linear time insertion + * and removal of elements at the beginning or in the middle. The number of + * elements in a \p device_vector may vary dynamically; memory management is + * automatic. The memory associated with a \p device_vector resides in the + * memory accessible to devices. + * + * \see https://en.cppreference.com/w/cpp/container/vector + * \see device_allocator + * \see host_vector + * \see universal_vector + */ +template > + class device_vector + : public detail::vector_base +{ + private: + typedef detail::vector_base Parent; + + public: + /*! \cond + */ + typedef typename Parent::size_type size_type; + typedef typename Parent::value_type value_type; + /*! \endcond + */ + + /*! This constructor creates an empty \p device_vector. + */ + device_vector(void) + :Parent() {} + + /*! This constructor creates an empty \p device_vector. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(const Alloc &alloc) + :Parent(alloc) {} + + /*! The destructor erases the elements. + */ + // Define an empty destructor to explicitly specify + // its execution space qualifier, as a workaround for nvcc warning + ~device_vector(void) {} + + /*! This constructor creates a \p device_vector with the given + * size. + * \param n The number of elements to initially create. + */ + explicit device_vector(size_type n) + :Parent(n) {} + + /*! This constructor creates a \p device_vector with the given + * size. + * \param n The number of elements to initially create. + * \param alloc The allocator to use by this device_vector. + */ + explicit device_vector(size_type n, const Alloc &alloc) + :Parent(n,alloc) {} + + /*! This constructor creates a \p device_vector with copies + * of an exemplar element. + * \param n The number of elements to initially create. + * \param value An element to copy. + */ + explicit device_vector(size_type n, const value_type &value) + :Parent(n,value) {} + + /*! This constructor creates a \p device_vector with copies + * of an exemplar element. + * \param n The number of elements to initially create. + * \param value An element to copy. + * \param alloc The allocator to use by this device_vector. + */ + explicit device_vector(size_type n, const value_type &value, const Alloc &alloc) + :Parent(n,value,alloc) {} + + /*! Copy constructor copies from an exemplar \p device_vector. + * \param v The \p device_vector to copy. + */ + device_vector(const device_vector &v) + :Parent(v) {} + + /*! Copy constructor copies from an exemplar \p device_vector. + * \param v The \p device_vector to copy. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(const device_vector &v, const Alloc &alloc) + :Parent(v,alloc) {} + + #if THRUST_CPP_DIALECT >= 2011 + /*! Move constructor moves from another \p device_vector. + * \param v The device_vector to move. + */ + device_vector(device_vector &&v) + :Parent(std::move(v)) {} + + /*! Move constructor moves from another \p device_vector. + * \param v The device_vector to move. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(device_vector &&v, const Alloc &alloc) + :Parent(std::move(v), alloc) {} + #endif // THRUST_CPP_DIALECT >= 2011 + + /*! Copy assign operator copies another \p device_vector with the same type. + * \param v The \p device_vector to copy. + */ + device_vector &operator=(const device_vector &v) + { Parent::operator=(v); return *this; } + + #if THRUST_CPP_DIALECT >= 2011 + /*! Move assign operator moves from another \p device_vector. + * \param v The device_vector to move. + */ + device_vector &operator=(device_vector &&v) + { Parent::operator=(std::move(v)); return *this; } + #endif // THRUST_CPP_DIALECT >= 2011 + + /*! Copy constructor copies from an exemplar \p device_vector with different type. + * \param v The \p device_vector to copy. + */ + template + explicit device_vector(const device_vector &v) + :Parent(v) {} + + /*! Assign operator copies from an exemplar \p device_vector with different type. + * \param v The \p device_vector to copy. + */ + template + device_vector &operator=(const device_vector &v) + { Parent::operator=(v); return *this; } + + /*! Copy constructor copies from an exemplar \c std::vector. + * \param v The std::vector to copy. + */ + template + device_vector(const std::vector &v) + :Parent(v) {} + + /*! Assign operator copies from an exemplar std::vector. + * \param v The std::vector to copy. + */ + template + device_vector &operator=(const std::vector &v) + { Parent::operator=(v); return *this;} + + /*! Copy construct from a \p vector_base whose element type is convertible + * to \c T. + * + * \param v The \p vector_base to copy. + */ + template + device_vector(const detail::vector_base &v) + :Parent(v) {} + + /*! Assign a \p vector_base whose element type is convertible to \c T. + * \param v The \p vector_base to copy. + */ + template + device_vector &operator=(const detail::vector_base &v) + { Parent::operator=(v); return *this; } + + /*! This constructor builds a \p device_vector from an intializer_list. + * \param il The intializer_list. + */ + device_vector(std::initializer_list il) + :Parent(il) {} + + /*! This constructor builds a \p device_vector from an intializer_list. + * \param il The intializer_list. + * \param alloc The allocator to use by this device_vector. + */ + device_vector(std::initializer_list il, const Alloc &alloc) + :Parent(il, alloc) {} + + /*! Assign an \p intializer_list with a matching element type + * \param il The intializer_list. + */ + device_vector &operator=(std::initializer_list il) + { Parent::operator=(il); return *this; } + + /*! This constructor builds a \p device_vector from a range. + * \param first The beginning of the range. + * \param last The end of the range. + */ + template + device_vector(InputIterator first, InputIterator last) + :Parent(first,last) {} + + /*! This constructor builds a \p device_vector from a range. + * \param first The beginning of the range. + * \param last The end of the range. + * \param alloc The allocator to use by this device_vector. + */ + template + device_vector(InputIterator first, InputIterator last, const Alloc &alloc) + :Parent(first,last,alloc) {} + +// declare these members for the purpose of Doxygenating them +// they actually exist in a derived-from class +#if 0 + /*! \brief Resizes this vector to the specified number of elements. + * \param new_size Number of elements this vector should contain. + * \param x Data with which new elements should be populated. + * \throw std::length_error If n exceeds max_size(). + * + * This method will resize this vector to the specified number of + * elements. If the number is smaller than this vector's current + * size this vector is truncated, otherwise this vector is + * extended and new elements are populated with given data. + */ + void resize(size_type new_size, const value_type &x = value_type()); + + /*! Returns the number of elements in this vector. + */ + size_type size(void) const; + + /*! Returns the size() of the largest possible vector. + * \return The largest possible return value of size(). + */ + size_type max_size(void) const; + + /*! \brief If n is less than or equal to capacity(), this call has no effect. + * Otherwise, this method is a request for allocation of additional memory. If + * the request is successful, then capacity() is greater than or equal to + * n; otherwise, capacity() is unchanged. In either case, size() is unchanged. + * \throw std::length_error If n exceeds max_size(). + */ + void reserve(size_type n); + + /*! Returns the number of elements which have been reserved in this + * vector. + */ + size_type capacity(void) const; + + /*! This method shrinks the capacity of this vector to exactly + * fit its elements. + */ + void shrink_to_fit(void); + + /*! \brief Subscript access to the data contained in this vector_dev. + * \param n The index of the element for which data should be accessed. + * \return Read/write reference to data. + * + * This operator allows for easy, array-style, data access. + * Note that data access with this operator is unchecked and + * out_of_range lookups are not defined. + */ + reference operator[](size_type n); + + /*! \brief Subscript read access to the data contained in this vector_dev. + * \param n The index of the element for which data should be accessed. + * \return Read reference to data. + * + * This operator allows for easy, array-style, data access. + * Note that data access with this operator is unchecked and + * out_of_range lookups are not defined. + */ + const_reference operator[](size_type n) const; + + /*! This method returns an iterator pointing to the beginning of + * this vector. + * \return mStart + */ + iterator begin(void); + + /*! This method returns a const_iterator pointing to the beginning + * of this vector. + * \return mStart + */ + const_iterator begin(void) const; + + /*! This method returns a const_iterator pointing to the beginning + * of this vector. + * \return mStart + */ + const_iterator cbegin(void) const; + + /*! This method returns a reverse_iterator pointing to the beginning of + * this vector's reversed sequence. + * \return A reverse_iterator pointing to the beginning of this + * vector's reversed sequence. + */ + reverse_iterator rbegin(void); + + /*! This method returns a const_reverse_iterator pointing to the beginning of + * this vector's reversed sequence. + * \return A const_reverse_iterator pointing to the beginning of this + * vector's reversed sequence. + */ + const_reverse_iterator rbegin(void) const; + + /*! This method returns a const_reverse_iterator pointing to the beginning of + * this vector's reversed sequence. + * \return A const_reverse_iterator pointing to the beginning of this + * vector's reversed sequence. + */ + const_reverse_iterator crbegin(void) const; + + /*! This method returns an iterator pointing to one element past the + * last of this vector. + * \return begin() + size(). + */ + iterator end(void); + + /*! This method returns a const_iterator pointing to one element past the + * last of this vector. + * \return begin() + size(). + */ + const_iterator end(void) const; + + /*! This method returns a const_iterator pointing to one element past the + * last of this vector. + * \return begin() + size(). + */ + const_iterator cend(void) const; + + /*! This method returns a reverse_iterator pointing to one element past the + * last of this vector's reversed sequence. + * \return rbegin() + size(). + */ + reverse_iterator rend(void); + + /*! This method returns a const_reverse_iterator pointing to one element past the + * last of this vector's reversed sequence. + * \return rbegin() + size(). + */ + const_reverse_iterator rend(void) const; + + /*! This method returns a const_reverse_iterator pointing to one element past the + * last of this vector's reversed sequence. + * \return rbegin() + size(). + */ + const_reverse_iterator crend(void) const; + + /*! This method returns a const_reference referring to the first element of this + * vector. + * \return The first element of this vector. + */ + const_reference front(void) const; + + /*! This method returns a reference pointing to the first element of this + * vector. + * \return The first element of this vector. + */ + reference front(void); + + /*! This method returns a const reference pointing to the last element of + * this vector. + * \return The last element of this vector. + */ + const_reference back(void) const; + + /*! This method returns a reference referring to the last element of + * this vector_dev. + * \return The last element of this vector. + */ + reference back(void); + + /*! This method returns a pointer to this vector's first element. + * \return A pointer to the first element of this vector. + */ + pointer data(void); + + /*! This method returns a const_pointer to this vector's first element. + * \return a const_pointer to the first element of this vector. + */ + const_pointer data(void) const; + + /*! This method resizes this vector to 0. + */ + void clear(void); + + /*! This method returns true iff size() == 0. + * \return true if size() == 0; false, otherwise. + */ + bool empty(void) const; + + /*! This method appends the given element to the end of this vector. + * \param x The element to append. + */ + void push_back(const value_type &x); + + /*! This method erases the last element of this vector, invalidating + * all iterators and references to it. + */ + void pop_back(void); + + /*! This method swaps the contents of this device_vector with another vector. + * \param v The vector with which to swap. + */ + void swap(device_vector &v); + + /*! This method removes the element at position pos. + * \param pos The position of the element of interest. + * \return An iterator pointing to the new location of the element that followed the element + * at position pos. + */ + iterator erase(iterator pos); + + /*! This method removes the range of elements [first,last) from this vector. + * \param first The beginning of the range of elements to remove. + * \param last The end of the range of elements to remove. + * \return An iterator pointing to the new location of the element that followed the last + * element in the sequence [first,last). + */ + iterator erase(iterator first, iterator last); + + /*! This method inserts a single copy of a given exemplar value at the + * specified position in this vector. + * \param position The insertion position. + * \param x The exemplar element to copy & insert. + * \return An iterator pointing to the newly inserted element. + */ + iterator insert(iterator position, const T &x); + + /*! This method inserts a copy of an exemplar value to a range at the + * specified position in this vector. + * \param position The insertion position + * \param n The number of insertions to perform. + * \param x The value to replicate and insert. + */ + void insert(iterator position, size_type n, const T &x); + + /*! This method inserts a copy of an input range at the specified position + * in this vector. + * \param position The insertion position. + * \param first The beginning of the range to copy. + * \param last The end of the range to copy. + * + * \tparam InputIterator is a model of Assignable. + */ + template + void insert(iterator position, InputIterator first, InputIterator last); + + /*! This version of \p assign replicates a given exemplar + * \p n times into this vector. + * \param n The number of times to copy \p x. + * \param x The exemplar element to replicate. + */ + void assign(size_type n, const T &x); + + /*! This version of \p assign makes this vector a copy of a given input range. + * \param first The beginning of the range to copy. + * \param last The end of the range to copy. + * + * \tparam InputIterator is a model of Input Iterator. + */ + template + void assign(InputIterator first, InputIterator last); + + /*! This method returns a copy of this vector's allocator. + * \return A copy of the alloctor used by this vector. + */ + allocator_type get_allocator(void) const; +#endif // end doxygen-only members +}; + +/*! Exchanges the values of two vectors. + * \p x The first \p device_vector of interest. + * \p y The second \p device_vector of interest. + */ +template + void swap(device_vector &a, device_vector &b) +{ + a.swap(b); +} + +/*! \} // containres + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/equal.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/equal.h new file mode 100644 index 0000000000000000000000000000000000000000..2f3518907895e7baefbef4e873895bd40306b504 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/equal.h @@ -0,0 +1,235 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file equal.h + * \brief Equality between ranges + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reductions + * \{ + * \addtogroup comparisons + * \ingroup reductions + * \{ + */ + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), *i == *(first2 + (i - first1)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is a model of Equality Comparable, + * and \p InputIterator1's \c value_type can be compared for equality with \c InputIterator2's \c value_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is a model of Equality Comparable, + * and \p InputIterator2's \c value_type can be compared for equality with \c InputIterator1's \c value_type. + * + * The following code snippet demonstrates how to use \p equal to test + * two ranges for equality using the \p thrust::host execution policy: + * + * \code + * #include + * #include + * ... + * int A1[7] = {3, 1, 4, 1, 5, 9, 3}; + * int A2[7] = {3, 1, 4, 2, 8, 5, 7}; + * ... + * bool result = thrust::equal(thrust::host, A1, A1 + 7, A2); + * + * // result == false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +__host__ __device__ +bool equal(const thrust::detail::execution_policy_base &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2); + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), *i == *(first2 + (i - first1)). + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is a model of Equality Comparable, + * and \p InputIterator1's \c value_type can be compared for equality with \c InputIterator2's \c value_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is a model of Equality Comparable, + * and \p InputIterator2's \c value_type can be compared for equality with \c InputIterator1's \c value_type. + * + * The following code snippet demonstrates how to use \p equal to test + * two ranges for equality. + * + * \code + * #include + * ... + * int A1[7] = {3, 1, 4, 1, 5, 9, 3}; + * int A2[7] = {3, 1, 4, 2, 8, 5, 7}; + * ... + * bool result = thrust::equal(A1, A1 + 7, A2); + * + * // result == false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +bool equal(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2); + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), + * binary_pred(*i, *(first2 + (i - first1))) is \c true. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param binary_pred Binary predicate used to test element equality. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p equal to compare the + * elements in two ranges modulo 2 using the \p thrust::host execution policy. + * + * \code + * #include + * #include + * ... + * + * struct compare_modulo_two + * { + * __host__ __device__ + * bool operator()(int x, int y) const + * { + * return (x % 2) == (y % 2); + * } + * }; + * ... + * int x[6] = {0, 2, 4, 6, 8, 10}; + * int y[6] = {1, 3, 5, 7, 9, 11}; + * + * bool result = thrust::equal(x, x + 6, y, compare_modulo_two()); + * + * // result is false + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +__host__ __device__ +bool equal(const thrust::detail::execution_policy_base &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate binary_pred); + + +/*! \p equal returns \c true if the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) are identical when + * compared element-by-element, and otherwise returns \c false. + * + * This version of \p equal returns \c true if and only if for every + * iterator \c i in [first1, last1), + * binary_pred(*i, *(first2 + (i - first1))) is \c true. + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param binary_pred Binary predicate used to test element equality. + * \return \c true, if the sequences are equal; \c false, otherwise. + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p equal to compare the + * elements in two ranges modulo 2. + * + * \code + * #include + * + * struct compare_modulo_two + * { + * __host__ __device__ + * bool operator()(int x, int y) const + * { + * return (x % 2) == (y % 2); + * } + * }; + * ... + * int x[6] = {0, 2, 4, 6, 8, 10}; + * int y[6] = {1, 3, 5, 7, 9, 11}; + * + * bool result = thrust::equal(x, x + 5, y, compare_modulo_two()); + * + * // result is true + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/equal + */ +template +bool equal(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, BinaryPredicate binary_pred); + + +/*! \} // end comparisons + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/event.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/event.h new file mode 100644 index 0000000000000000000000000000000000000000..75578d96443c2e20c0b80b4d4efe28a6a5a5b04f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/event.h @@ -0,0 +1,26 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file thrust/event.h + * \brief `thrust::event`, an asynchronous handle type. + */ + +#pragma once + +#include + +// TODO: Actually separate `` into two headers. + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/execution_policy.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/execution_policy.h new file mode 100644 index 0000000000000000000000000000000000000000..ecf14413f604ad19c3904d341f467e86b4fbcf7a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/execution_policy.h @@ -0,0 +1,392 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file thrust/execution_policy.h + * \brief Thrust execution policies. + */ + +#pragma once + +#include +#include +#include +#include + +//! \cond + +// #include the host system's execution_policy header +#define __THRUST_HOST_SYSTEM_EXECUTION_POLICY_HEADER <__THRUST_HOST_SYSTEM_ROOT/execution_policy.h> +#include __THRUST_HOST_SYSTEM_EXECUTION_POLICY_HEADER +#undef __THRUST_HOST_SYSTEM_EXECUTION_POLICY_HEADER + +// #include the device system's execution_policy.h header +#define __THRUST_DEVICE_SYSTEM_EXECUTION_POLICY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/execution_policy.h> +#include __THRUST_DEVICE_SYSTEM_EXECUTION_POLICY_HEADER +#undef __THRUST_DEVICE_SYSTEM_EXECUTION_POLICY_HEADER + +//! \endcond + +THRUST_NAMESPACE_BEGIN + +/*! \cond + */ + + +namespace detail +{ + + +typedef thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::detail::par_t host_t; + + +typedef thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::detail::par_t device_t; + + +} // end detail + + +/*! \endcond + */ + + +/*! \addtogroup execution_policies Parallel Execution Policies + * \{ + */ + + +// define execution_policy for the purpose of Doxygenating it +// it is actually defined elsewhere +#if 0 +/*! \p execution_policy is the base class for all Thrust parallel execution policies + * like \p thrust::host, \p thrust::device, and each backend system's tag type. + * + * Custom user-defined backends should derive a policy from this type in order to + * interoperate with Thrust algorithm dispatch. + * + * The following code snippet demonstrates how to derive a standalone custom execution policy + * from \p thrust::execution_policy to implement a backend which only implements \p for_each: + * + * \code + * #include + * #include + * + * // define a type derived from thrust::execution_policy to distinguish our custom execution policy: + * struct my_policy : thrust::execution_policy {}; + * + * // overload for_each on my_policy + * template + * Iterator for_each(my_policy, Iterator first, Iterator last, Function f) + * { + * std::cout << "Hello, world from for_each(my_policy)!" << std::endl; + * + * for(; first < last; ++first) + * { + * f(*first); + * } + * + * return first; + * } + * + * struct ignore_argument + * { + * void operator()(int) {} + * }; + * + * int main() + * { + * int data[4]; + * + * // dispatch thrust::for_each using our custom policy: + * my_policy exec; + * thrust::for_each(exec, data, data + 4, ignore_argument()); + * + * // can't dispatch thrust::transform because no overload exists for my_policy: + * //thrust::transform(exec, data, data, + 4, data, thrust::identity()); // error! + * + * return 0; + * } + * \endcode + * + * \see host_execution_policy + * \see device_execution_policy + */ +template +struct execution_policy : thrust::detail::execution_policy_base +{}; +#endif + + +/*! \p host_execution_policy is the base class for all Thrust parallel execution policies + * which are derived from Thrust's default host backend system configured with the \p THRUST_HOST_SYSTEM + * macro. + * + * Custom user-defined backends which wish to inherit the functionality of Thrust's host backend system + * should derive a policy from this type in order to interoperate with Thrust algorithm dispatch. + * + * The following code snippet demonstrates how to derive a standalone custom execution policy from + * \p thrust::host_execution_policy to implement a backend which specializes \p for_each while inheriting + * the behavior of every other algorithm from the host system: + * + * \code + * #include + * #include + * + * // define a type derived from thrust::host_execution_policy to distinguish our custom execution policy: + * struct my_policy : thrust::host_execution_policy {}; + * + * // overload for_each on my_policy + * template + * Iterator for_each(my_policy, Iterator first, Iterator last, Function f) + * { + * std::cout << "Hello, world from for_each(my_policy)!" << std::endl; + * + * for(; first < last; ++first) + * { + * f(*first); + * } + * + * return first; + * } + * + * struct ignore_argument + * { + * void operator()(int) {} + * }; + * + * int main() + * { + * int data[4]; + * + * // dispatch thrust::for_each using our custom policy: + * my_policy exec; + * thrust::for_each(exec, data, data + 4, ignore_argument()); + * + * // dispatch thrust::transform whose behavior our policy inherits + * thrust::transform(exec, data, data, + 4, data, thrust::identity()); + * + * return 0; + * } + * \endcode + * + * \see execution_policy + * \see device_execution_policy + */ +template + struct host_execution_policy + : thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::execution_policy +{}; + + +/*! \p device_execution_policy is the base class for all Thrust parallel execution policies + * which are derived from Thrust's default device backend system configured with the \p THRUST_DEVICE_SYSTEM + * macro. + * + * Custom user-defined backends which wish to inherit the functionality of Thrust's device backend system + * should derive a policy from this type in order to interoperate with Thrust algorithm dispatch. + * + * The following code snippet demonstrates how to derive a standalone custom execution policy from + * \p thrust::device_execution_policy to implement a backend which specializes \p for_each while inheriting + * the behavior of every other algorithm from the device system: + * + * \code + * #include + * #include + * + * // define a type derived from thrust::device_execution_policy to distinguish our custom execution policy: + * struct my_policy : thrust::device_execution_policy {}; + * + * // overload for_each on my_policy + * template + * Iterator for_each(my_policy, Iterator first, Iterator last, Function f) + * { + * std::cout << "Hello, world from for_each(my_policy)!" << std::endl; + * + * for(; first < last; ++first) + * { + * f(*first); + * } + * + * return first; + * } + * + * struct ignore_argument + * { + * void operator()(int) {} + * }; + * + * int main() + * { + * int data[4]; + * + * // dispatch thrust::for_each using our custom policy: + * my_policy exec; + * thrust::for_each(exec, data, data + 4, ignore_argument()); + * + * // dispatch thrust::transform whose behavior our policy inherits + * thrust::transform(exec, data, data, + 4, data, thrust::identity()); + * + * return 0; + * } + * \endcode + * + * \see execution_policy + * \see host_execution_policy + */ +template + struct device_execution_policy + : thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::execution_policy +{}; + + +/*! \p thrust::host is the default parallel execution policy associated with Thrust's host backend system + * configured by the \p THRUST_HOST_SYSTEM macro. + * + * Instead of relying on implicit algorithm dispatch through iterator system tags, users may directly target + * algorithm dispatch at Thrust's host system by providing \p thrust::host as an algorithm parameter. + * + * Explicit dispatch can be useful in avoiding the introduction of data copies into containers such as + * \p thrust::host_vector. + * + * Note that even though \p thrust::host targets the host CPU, it is a parallel execution policy. That is, + * the order that an algorithm invokes functors or dereferences iterators is not defined. + * + * The type of \p thrust::host is implementation-defined. + * + * The following code snippet demonstrates how to use \p thrust::host to explicitly dispatch an invocation + * of \p thrust::for_each to the host backend system: + * + * \code + * #include + * #include + * #include + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * printf("%d\n", x); + * } + * }; + * ... + * int vec[] = { 0, 1, 2 }; + * + * thrust::for_each(thrust::host, vec, vec + 3, printf_functor()); + * + * // 0 1 2 is printed to standard output in some unspecified order + * \endcode + * + * \see host_execution_policy + * \see thrust::device + */ +static const detail::host_t host; + + +/*! \p thrust::device is the default parallel execution policy associated with Thrust's device backend system + * configured by the \p THRUST_DEVICE_SYSTEM macro. + * + * Instead of relying on implicit algorithm dispatch through iterator system tags, users may directly target + * algorithm dispatch at Thrust's device system by providing \p thrust::device as an algorithm parameter. + * + * Explicit dispatch can be useful in avoiding the introduction of data copies into containers such as + * \p thrust::device_vector or to avoid wrapping e.g. raw pointers allocated by the CUDA API with types + * such as \p thrust::device_ptr. + * + * The user must take care to guarantee that the iterators provided to an algorithm are compatible with + * the device backend system. For example, raw pointers allocated by std::malloc typically + * cannot be dereferenced by a GPU. For this reason, raw pointers allocated by host APIs should not be mixed + * with a \p thrust::device algorithm invocation when the device backend is CUDA. + * + * The type of \p thrust::device is implementation-defined. + * + * The following code snippet demonstrates how to use \p thrust::device to explicitly dispatch an invocation + * of \p thrust::for_each to the device backend system: + * + * \code + * #include + * #include + * #include + * #include + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * printf("%d\n", x); + * } + * }; + * ... + * thrust::device_vector vec(3); + * vec[0] = 0; vec[1] = 1; vec[2] = 2; + * + * thrust::for_each(thrust::device, vec.begin(), vec.end(), printf_functor()); + * + * // 0 1 2 is printed to standard output in some unspecified order + * \endcode + * + * \see host_execution_policy + * \see thrust::device + */ +THRUST_INLINE_CONSTANT detail::device_t device; + + +// define seq for the purpose of Doxygenating it +// it is actually defined elsewhere +#if 0 +/*! \p thrust::seq is an execution policy which requires an algorithm invocation to execute sequentially + * in the current thread. It can not be configured by a compile-time macro. + * + * The type of \p thrust::seq is implementation-defined. + * + * The following code snippet demonstrates how to use \p thrust::seq to explicitly execute an invocation + * of \p thrust::for_each sequentially: + * + * \code + * #include + * #include + * #include + * #include + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * printf("%d\n", x); + * } + * }; + * ... + * std::vector vec(3); + * vec[0] = 0; vec[1] = 1; vec[2] = 2; + * + * thrust::for_each(thrust::seq, vec.begin(), vec.end(), printf_functor()); + * + * // 0 1 2 is printed to standard output in sequential order + * \endcode + * + * \see thrust::host + * \see thrust::device + */ +static const detail::seq_t seq; +#endif + + +/*! \} + */ + + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/extrema.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/extrema.h new file mode 100644 index 0000000000000000000000000000000000000000..ca419a0aa5bd8282cadf9d13a4c7161bf0a781d6 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/extrema.h @@ -0,0 +1,801 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file extrema.h + * \brief Functions for computing computing extremal values + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! This version of \p min returns the smaller of two values, given a comparison operation. + * \param lhs The first value to compare. + * \param rhs The second value to compare. + * \param comp A comparison operation. + * \return The smaller element. + * + * \tparam T is convertible to \p BinaryPredicate's first argument type and to its second argument type. + * \tparam BinaryPredicate is a model of BinaryPredicate. + * + * The following code snippet demonstrates how to use \p min to compute the smaller of two + * key-value objects. + * + * \code + * #include + * ... + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value a = {13, 0}; + * key_value b = { 7, 1); + * + * key_value smaller = thrust::min(a, b, compare_key_value()); + * + * // smaller is {7, 1} + * \endcode + * + * \note Returns the first argument when the arguments are equivalent. + * \see max + */ +template +__host__ __device__ + T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp); + + +/*! This version of \p min returns the smaller of two values. + * \param lhs The first value to compare. + * \param rhs The second value to compare. + * \return The smaller element. + * + * \tparam T is a model of LessThan Comparable. + * + * The following code snippet demonstrates how to use \p min to compute the smaller of two + * integers. + * + * \code + * #include + * ... + * int a = 13; + * int b = 7; + * + * int smaller = thrust::min(a, b); + * + * // smaller is 7 + * \endcode + * + * \note Returns the first argument when the arguments are equivalent. + * \see max + */ +template +__host__ __device__ + T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs); + + +/*! This version of \p max returns the larger of two values, given a comparison operation. + * \param lhs The first value to compare. + * \param rhs The second value to compare. + * \param comp A comparison operation. + * \return The larger element. + * + * \tparam T is convertible to \p BinaryPredicate's first argument type and to its second argument type. + * \tparam BinaryPredicate is a model of BinaryPredicate. + * + * The following code snippet demonstrates how to use \p max to compute the larger of two + * key-value objects. + * + * \code + * #include + * ... + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value a = {13, 0}; + * key_value b = { 7, 1); + * + * key_value larger = thrust::max(a, b, compare_key_value()); + * + * // larger is {13, 0} + * \endcode + * + * \note Returns the first argument when the arguments are equivalent. + * \see min + */ +template +__host__ __device__ + T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp); + + +/*! This version of \p max returns the larger of two values. + * \param lhs The first value to compare. + * \param rhs The second value to compare. + * \return The larger element. + * + * \tparam T is a model of LessThan Comparable. + * + * The following code snippet demonstrates how to use \p max to compute the larger of two + * integers. + * + * \code + * #include + * ... + * int a = 13; + * int b = 7; + * + * int larger = thrust::min(a, b); + * + * // larger is 13 + * \endcode + * + * \note Returns the first argument when the arguments are equivalent. + * \see min + */ +template +__host__ __device__ + T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs); + + +/*! \addtogroup reductions + * \{ + * \addtogroup extrema + * \ingroup reductions + * \{ + */ + +/*! \p min_element finds the smallest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value smaller + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p min_element differ in how they define whether one element is + * less than another. This version compares objects using \c operator<. Specifically, + * this version of \p min_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), *j < *i is + * \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return An iterator pointing to the smallest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \c ForwardIterator's \c value_type is a model of + * LessThan Comparable. + * + * \code + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int *result = thrust::min_element(thrust::host, data, data + 6); + * + * // result is data + 1 + * // *result is 0 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/min_element + */ +template +__host__ __device__ +ForwardIterator min_element(const thrust::detail::execution_policy_base &exec, ForwardIterator first, ForwardIterator last); + + +/*! \p min_element finds the smallest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value smaller + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p min_element differ in how they define whether one element is + * less than another. This version compares objects using \c operator<. Specifically, + * this version of \p min_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), *j < *i is + * \c false. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return An iterator pointing to the smallest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \c ForwardIterator's \c value_type is a model of + * LessThan Comparable. + * + * \code + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int *result = thrust::min_element(data, data + 6); + * + * // result is data + 1 + * // *result is 0 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/min_element + */ +template +ForwardIterator min_element(ForwardIterator first, ForwardIterator last); + + +/*! \p min_element finds the smallest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value smaller + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p min_element differ in how they define whether one element is + * less than another. This version compares objects using a function object \p comp. + * Specifically, this version of \p min_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), comp(*j, *i) is + * \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param comp A binary predicate used for comparison. + * \return An iterator pointing to the smallest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to both \p comp's + * \c first_argument_type and \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p min_element to find the smallest element + * of a collection of key-value pairs using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} }; + * + * key_value *smallest = thrust::min_element(thrust::host, data, data + 4, compare_key_value()); + * + * // smallest == data + 1 + * // *smallest == {0,7} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/min_element + */ +template +__host__ __device__ +ForwardIterator min_element(const thrust::detail::execution_policy_base &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp); + + +/*! \p min_element finds the smallest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value smaller + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p min_element differ in how they define whether one element is + * less than another. This version compares objects using a function object \p comp. + * Specifically, this version of \p min_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), comp(*j, *i) is + * \c false. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param comp A binary predicate used for comparison. + * \return An iterator pointing to the smallest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to both \p comp's + * \c first_argument_type and \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p min_element to find the smallest element + * of a collection of key-value pairs. + * + * \code + * #include + * + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} }; + * + * key_value *smallest = thrust::min_element(data, data + 4, compare_key_value()); + * + * // smallest == data + 1 + * // *smallest == {0,7} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/min_element + */ +template +ForwardIterator min_element(ForwardIterator first, ForwardIterator last, + BinaryPredicate comp); + + +/*! \p max_element finds the largest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value larger + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p max_element differ in how they define whether one element is + * greater than another. This version compares objects using \c operator<. Specifically, + * this version of \p max_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), *i < *j is + * \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return An iterator pointing to the largest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam A Thrust backend system. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \c ForwardIterator's \c value_type is a model of + * LessThan Comparable. + * + * \code + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int *result = thrust::max_element(thrust::host, data, data + 6); + * + * // *result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/max_element + */ +template +__host__ __device__ +ForwardIterator max_element(const thrust::detail::execution_policy_base &exec, ForwardIterator first, ForwardIterator last); + + +/*! \p max_element finds the largest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value larger + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p max_element differ in how they define whether one element is + * greater than another. This version compares objects using \c operator<. Specifically, + * this version of \p max_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), *i < *j is + * \c false. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return An iterator pointing to the largest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \c ForwardIterator's \c value_type is a model of + * LessThan Comparable. + * + * \code + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int *result = thrust::max_element(data, data + 6); + * + * // *result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/max_element + */ +template +ForwardIterator max_element(ForwardIterator first, ForwardIterator last); + + +/*! \p max_element finds the largest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value larger + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p max_element differ in how they define whether one element is + * less than another. This version compares objects using a function object \p comp. + * Specifically, this version of \p max_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), comp(*i, *j) is + * \c false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param comp A binary predicate used for comparison. + * \return An iterator pointing to the largest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to both \p comp's + * \c first_argument_type and \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p max_element to find the largest element + * of a collection of key-value pairs using the \p thrust::host execution policy for parallelization. + * + * \code + * #include + * #include + * ... + * + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} }; + * + * key_value *largest = thrust::max_element(thrust::host, data, data + 4, compare_key_value()); + * + * // largest == data + 3 + * // *largest == {6,1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/max_element + */ +template +__host__ __device__ +ForwardIterator max_element(const thrust::detail::execution_policy_base &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp); + + +/*! \p max_element finds the largest element in the range [first, last). + * It returns the first iterator \c i in [first, last) + * such that no other iterator in [first, last) points to a value larger + * than \c *i. The return value is \p last if and only if [first, last) is an + * empty range. + * + * The two versions of \p max_element differ in how they define whether one element is + * less than another. This version compares objects using a function object \p comp. + * Specifically, this version of \p max_element returns the first iterator \c i in [first, last) + * such that, for every iterator \c j in [first, last), comp(*i, *j) is + * \c false. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param comp A binary predicate used for comparison. + * \return An iterator pointing to the largest element of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to both \p comp's + * \c first_argument_type and \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p max_element to find the largest element + * of a collection of key-value pairs. + * + * \code + * #include + * + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} }; + * + * key_value *largest = thrust::max_element(data, data + 4, compare_key_value()); + * + * // largest == data + 3 + * // *largest == {6,1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/max_element + */ +template +ForwardIterator max_element(ForwardIterator first, ForwardIterator last, + BinaryPredicate comp); + + +/*! \p minmax_element finds the smallest and largest elements in the range [first, last). + * It returns a pair of iterators (imin, imax) where \c imin is the same iterator + * returned by \p min_element and \c imax is the same iterator returned by \p max_element. + * This function is potentially more efficient than separate calls to \p min_element and \p max_element. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return A pair of iterator pointing to the smallest and largest elements of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \c ForwardIterator's \c value_type is a model of + * LessThan Comparable. + * + * \code + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * thrust::pair result = thrust::minmax_element(thrust::host, data, data + 6); + * + * // result.first is data + 1 + * // result.second is data + 5 + * // *result.first is 0 + * // *result.second is 3 + * \endcode + * + * \see min_element + * \see max_element + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf + */ +template +__host__ __device__ +thrust::pair minmax_element(const thrust::detail::execution_policy_base &exec, ForwardIterator first, ForwardIterator last); + + +/*! \p minmax_element finds the smallest and largest elements in the range [first, last). + * It returns a pair of iterators (imin, imax) where \c imin is the same iterator + * returned by \p min_element and \c imax is the same iterator returned by \p max_element. + * This function is potentially more efficient than separate calls to \p min_element and \p max_element. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return A pair of iterator pointing to the smallest and largest elements of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \c ForwardIterator's \c value_type is a model of + * LessThan Comparable. + * + * \code + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * thrust::pair result = thrust::minmax_element(data, data + 6); + * + * // result.first is data + 1 + * // result.second is data + 5 + * // *result.first is 0 + * // *result.second is 3 + * \endcode + * + * \see min_element + * \see max_element + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf + */ +template +thrust::pair minmax_element(ForwardIterator first, + ForwardIterator last); + + +/*! \p minmax_element finds the smallest and largest elements in the range [first, last). + * It returns a pair of iterators (imin, imax) where \c imin is the same iterator + * returned by \p min_element and \c imax is the same iterator returned by \p max_element. + * This function is potentially more efficient than separate calls to \p min_element and \p max_element. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param comp A binary predicate used for comparison. + * \return A pair of iterator pointing to the smallest and largest elements of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to both \p comp's + * \c first_argument_type and \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p minmax_element to find the smallest and largest elements + * of a collection of key-value pairs using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} }; + * + * thrust::pair extrema = thrust::minmax_element(thrust::host, data, data + 4, compare_key_value()); + * + * // extrema.first == data + 1 + * // *extrema.first == {0,7} + * // extrema.second == data + 3 + * // *extrema.second == {6,1} + * \endcode + * + * \see min_element + * \see max_element + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf + */ +template +__host__ __device__ +thrust::pair minmax_element(const thrust::detail::execution_policy_base &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp); + + +/*! \p minmax_element finds the smallest and largest elements in the range [first, last). + * It returns a pair of iterators (imin, imax) where \c imin is the same iterator + * returned by \p min_element and \c imax is the same iterator returned by \p max_element. + * This function is potentially more efficient than separate calls to \p min_element and \p max_element. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param comp A binary predicate used for comparison. + * \return A pair of iterator pointing to the smallest and largest elements of the range [first, last), + * if it is not an empty range; \p last, otherwise. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to both \p comp's + * \c first_argument_type and \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p minmax_element to find the smallest and largest elements + * of a collection of key-value pairs. + * + * \code + * #include + * #include + * + * struct key_value + * { + * int key; + * int value; + * }; + * + * struct compare_key_value + * { + * __host__ __device__ + * bool operator()(key_value lhs, key_value rhs) + * { + * return lhs.key < rhs.key; + * } + * }; + * + * ... + * key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} }; + * + * thrust::pair extrema = thrust::minmax_element(data, data + 4, compare_key_value()); + * + * // extrema.first == data + 1 + * // *extrema.first == {0,7} + * // extrema.second == data + 3 + * // *extrema.second == {6,1} + * \endcode + * + * \see min_element + * \see max_element + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf + */ +template +thrust::pair minmax_element(ForwardIterator first, + ForwardIterator last, + BinaryPredicate comp); + +/*! \} // end extrema + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/fill.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/fill.h new file mode 100644 index 0000000000000000000000000000000000000000..bd9e40268972d7d03f7860ad8771d54f0c8a29e7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/fill.h @@ -0,0 +1,206 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file fill.h + * \brief Fills a range with a constant value + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup transformations + * \addtogroup filling + * \ingroup transformations + * \{ + */ + + +/*! \p fill assigns the value \p value to every element in + * the range [first, last). That is, for every + * iterator \c i in [first, last), it performs + * the assignment *i = value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param value The value to be copied. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam T is a model of Assignable, + * and \p T's \c value_type is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's + * elements to a given value using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector v(4); + * thrust::fill(thrust::device, v.begin(), v.end(), 137); + * + * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/fill + * \see \c fill_n + * \see \c uninitialized_fill + */ +template +__host__ __device__ + void fill(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T &value); + + +/*! \p fill assigns the value \p value to every element in + * the range [first, last). That is, for every + * iterator \c i in [first, last), it performs + * the assignment *i = value. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param value The value to be copied. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam T is a model of Assignable, + * and \p T's \c value_type is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's + * elements to a given value. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(4); + * thrust::fill(v.begin(), v.end(), 137); + * + * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/fill + * \see \c fill_n + * \see \c uninitialized_fill + */ +template +__host__ __device__ + void fill(ForwardIterator first, + ForwardIterator last, + const T &value); + + +/*! \p fill_n assigns the value \p value to every element in + * the range [first, first+n). That is, for every + * iterator \c i in [first, first+n), it performs + * the assignment *i = value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param n The size of the sequence. + * \param value The value to be copied. + * \return first + n + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam T is a model of Assignable, + * and \p T's \c value_type is convertible to a type in \p OutputIterator's set of \c value_type. + * + * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's + * elements to a given value using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector v(4); + * thrust::fill_n(thrust::device, v.begin(), v.size(), 137); + * + * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/fill_n + * \see \c fill + * \see \c uninitialized_fill_n + */ +template +__host__ __device__ + OutputIterator fill_n(const thrust::detail::execution_policy_base &exec, + OutputIterator first, + Size n, + const T &value); + + +/*! \p fill_n assigns the value \p value to every element in + * the range [first, first+n). That is, for every + * iterator \c i in [first, first+n), it performs + * the assignment *i = value. + * + * \param first The beginning of the sequence. + * \param n The size of the sequence. + * \param value The value to be copied. + * \return first + n + * + * \tparam OutputIterator is a model of Output Iterator. + * \tparam T is a model of Assignable, + * and \p T's \c value_type is convertible to a type in \p OutputIterator's set of \c value_type. + * + * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's + * elements to a given value. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v(4); + * thrust::fill_n(v.begin(), v.size(), 137); + * + * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/fill_n + * \see \c fill + * \see \c uninitialized_fill_n + */ +template +__host__ __device__ + OutputIterator fill_n(OutputIterator first, + Size n, + const T &value); + + +/*! \} // end filling + * \} // transformations + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/for_each.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/for_each.h new file mode 100644 index 0000000000000000000000000000000000000000..7d05e3ea16bd5949c0bede7d822e59d0940dcf5d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/for_each.h @@ -0,0 +1,278 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file thrust/for_each.h + * \brief Applies a function to each element in a range + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup modifying + * \ingroup transformations + * \{ + */ + + +/*! \p for_each applies the function object \p f to each element + * in the range [first, last); \p f's return value, if any, + * is ignored. Unlike the C++ Standard Template Library function + * std::for_each, this version offers no guarantee on + * order of execution. For this reason, this version of \p for_each + * does not return a copy of the function object. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param f The function object to apply to the range [first, last). + * \return last + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type. + * \tparam UnaryFunction is a model of Unary Function, + * and \p UnaryFunction does not apply any non-constant operation through its argument. + * + * The following code snippet demonstrates how to use \p for_each to print the elements + * of a \p thrust::device_vector using the \p thrust::device parallelization policy: + * + * \code + * #include + * #include + * #include + * #include + * ... + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * // note that using printf in a __device__ function requires + * // code compiled for a GPU with compute capability 2.0 or + * // higher (nvcc --arch=sm_20) + * printf("%d\n", x); + * } + * }; + * ... + * thrust::device_vector d_vec(3); + * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2; + * + * thrust::for_each(thrust::device, d_vec.begin(), d_vec.end(), printf_functor()); + * + * // 0 1 2 is printed to standard output in some unspecified order + * \endcode + * + * \see for_each_n + * \see https://en.cppreference.com/w/cpp/algorithm/for_each + */ +template +__host__ __device__ +InputIterator for_each(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + UnaryFunction f); + + +/*! \p for_each_n applies the function object \p f to each element + * in the range [first, first + n); \p f's return value, if any, + * is ignored. Unlike the C++ Standard Template Library function + * std::for_each, this version offers no guarantee on + * order of execution. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param n The size of the input sequence. + * \param f The function object to apply to the range [first, first + n). + * \return first + n + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type. + * \tparam Size is an integral type. + * \tparam UnaryFunction is a model of Unary Function, + * and \p UnaryFunction does not apply any non-constant operation through its argument. + * + * The following code snippet demonstrates how to use \p for_each_n to print the elements + * of a \p device_vector using the \p thrust::device parallelization policy. + * + * \code + * #include + * #include + * #include + * #include + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * // note that using printf in a __device__ function requires + * // code compiled for a GPU with compute capability 2.0 or + * // higher (nvcc --arch=sm_20) + * printf("%d\n", x); + * } + * }; + * ... + * thrust::device_vector d_vec(3); + * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2; + * + * thrust::for_each_n(thrust::device, d_vec.begin(), d_vec.size(), printf_functor()); + * + * // 0 1 2 is printed to standard output in some unspecified order + * \endcode + * + * \see for_each + * \see https://en.cppreference.com/w/cpp/algorithm/for_each + */ +template +__host__ __device__ +InputIterator for_each_n(const thrust::detail::execution_policy_base &exec, + InputIterator first, + Size n, + UnaryFunction f); + +/*! \p for_each applies the function object \p f to each element + * in the range [first, last); \p f's return value, if any, + * is ignored. Unlike the C++ Standard Template Library function + * std::for_each, this version offers no guarantee on + * order of execution. For this reason, this version of \p for_each + * does not return a copy of the function object. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param f The function object to apply to the range [first, last). + * \return last + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type. + * \tparam UnaryFunction is a model of Unary Function, + * and \p UnaryFunction does not apply any non-constant operation through its argument. + * + * The following code snippet demonstrates how to use \p for_each to print the elements + * of a \p device_vector. + * + * \code + * #include + * #include + * #include + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * // note that using printf in a __device__ function requires + * // code compiled for a GPU with compute capability 2.0 or + * // higher (nvcc --arch=sm_20) + * printf("%d\n", x); + * } + * }; + * ... + * thrust::device_vector d_vec(3); + * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2; + * + * thrust::for_each(d_vec.begin(), d_vec.end(), printf_functor()); + * + * // 0 1 2 is printed to standard output in some unspecified order + * \endcode + * + * \see for_each_n + * \see https://en.cppreference.com/w/cpp/algorithm/for_each + */ +template +InputIterator for_each(InputIterator first, + InputIterator last, + UnaryFunction f); + + +/*! \p for_each_n applies the function object \p f to each element + * in the range [first, first + n); \p f's return value, if any, + * is ignored. Unlike the C++ Standard Template Library function + * std::for_each, this version offers no guarantee on + * order of execution. + * + * \param first The beginning of the sequence. + * \param n The size of the input sequence. + * \param f The function object to apply to the range [first, first + n). + * \return first + n + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type. + * \tparam Size is an integral type. + * \tparam UnaryFunction is a model of Unary Function, + * and \p UnaryFunction does not apply any non-constant operation through its argument. + * + * The following code snippet demonstrates how to use \p for_each_n to print the elements + * of a \p device_vector. + * + * \code + * #include + * #include + * #include + * + * struct printf_functor + * { + * __host__ __device__ + * void operator()(int x) + * { + * // note that using printf in a __device__ function requires + * // code compiled for a GPU with compute capability 2.0 or + * // higher (nvcc --arch=sm_20) + * printf("%d\n", x); + * } + * }; + * ... + * thrust::device_vector d_vec(3); + * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2; + * + * thrust::for_each_n(d_vec.begin(), d_vec.size(), printf_functor()); + * + * // 0 1 2 is printed to standard output in some unspecified order + * \endcode + * + * \see for_each + * \see https://en.cppreference.com/w/cpp/algorithm/for_each + */ +template +InputIterator for_each_n(InputIterator first, + Size n, + UnaryFunction f); + +/*! \} // end modifying + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/future.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/future.h new file mode 100644 index 0000000000000000000000000000000000000000..d8fb7544b1777c2b52178ebdd8b829b29b5fc8b1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/future.h @@ -0,0 +1,176 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file thrust/future.h + * \brief `thrust::future`, an asynchronous value type. + */ + +#pragma once + +#include +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#include +#include + +#include + +/* +// #include the host system's pointer.h header. +#define __THRUST_HOST_SYSTEM_POINTER_HEADER <__THRUST_HOST_SYSTEM_ROOT/pointer.h> + #include __THRUST_HOST_SYSTEM_POINTER_HEADER +#undef __THRUST_HOST_SYSTEM_POINTER_HEADER +*/ + +// #include the device system's pointer.h header. +#define __THRUST_DEVICE_SYSTEM_POINTER_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/pointer.h> + #include __THRUST_DEVICE_SYSTEM_POINTER_HEADER +#undef __THRUST_DEVICE_SYSTEM_POINTER_HEADER + +/* +// #include the host system's future.h header. +#define __THRUST_HOST_SYSTEM_FUTURE_HEADER <__THRUST_HOST_SYSTEM_ROOT/future.h> + #include __THRUST_HOST_SYSTEM_FUTURE_HEADER +#undef __THRUST_HOST_SYSTEM_FUTURE_HEADER +*/ + +// #include the device system's future.h header. +#define __THRUST_DEVICE_SYSTEM_FUTURE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/future.h> + #include __THRUST_DEVICE_SYSTEM_FUTURE_HEADER +#undef __THRUST_DEVICE_SYSTEM_FUTURE_HEADER + +THRUST_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// + +// `select_unique_(future|event)_type` is a hook for choosing the +// `unique_eager_event`/`unique_eager_future` type for a system. `decltype` is +// used to determine the return type of an ADL call to +// `select_unique_eager_(future|event)_type(system)`; that return type should +// be the correct event/future type for `system`. Overloads should only be +// declared, not defined. + +namespace unimplemented +{ + +struct no_unique_eager_event_type_found {}; + +inline __host__ +no_unique_eager_event_type_found +unique_eager_event_type(...) noexcept; + +struct no_unique_eager_future_type_found {}; + +template +__host__ +no_unique_eager_future_type_found +unique_eager_future_type(...) noexcept; + +} // namespace unimplemented + +namespace unique_eager_event_type_detail +{ + +using unimplemented::unique_eager_event_type; + +template +using select = decltype( + unique_eager_event_type(std::declval()) +); + +} // namespace unique_eager_event_type_detail + +namespace unique_eager_future_type_detail +{ + +using unimplemented::unique_eager_future_type; + +template +using select = decltype( + unique_eager_future_type(std::declval()) +); + +} // namespace unique_eager_future_type_detail + +/////////////////////////////////////////////////////////////////////////////// + +template +using unique_eager_event = unique_eager_event_type_detail::select; + +template +using event = unique_eager_event; + +/////////////////////////////////////////////////////////////////////////////// + +template +using unique_eager_future = unique_eager_future_type_detail::select; + +template +using future = unique_eager_future; + +/* +/////////////////////////////////////////////////////////////////////////////// + +using host_unique_eager_event = unique_eager_event_type_detail::select< + thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::tag +>; +using host_event = host_unique_eager_event; + +/////////////////////////////////////////////////////////////////////////////// + +template +using host_unique_eager_future = unique_eager_future_type_detail::select< + thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::tag, T +>; +template +using host_future = host_unique_eager_future; +*/ + +/////////////////////////////////////////////////////////////////////////////// + +using device_unique_eager_event = unique_eager_event_type_detail::select< + thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag +>; + +using device_event = device_unique_eager_event; + +/////////////////////////////////////////////////////////////////////////////// + +template +using device_unique_eager_future = unique_eager_future_type_detail::select< + thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag, T +>; + +template +using device_future = device_unique_eager_future; + +/////////////////////////////////////////////////////////////////////////////// + +struct new_stream_t final {}; + +THRUST_INLINE_CONSTANT new_stream_t new_stream{}; + +/////////////////////////////////////////////////////////////////////////////// + +using thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::when_all; + +/////////////////////////////////////////////////////////////////////////////// + +THRUST_NAMESPACE_END + +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/generate.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/generate.h new file mode 100644 index 0000000000000000000000000000000000000000..d47295344607d3112089cd1ed775d16fa4b81adf --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/generate.h @@ -0,0 +1,211 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file generate.h + * \brief Fills a range with values "generated" from a function of no arguments + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup transformations + * \{ + */ + + +/*! \p generate assigns the result of invoking \p gen, a function object that takes no arguments, + * to each element in the range [first,last). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element in the range of interest. + * \param last The last element in the range of interest. + * \param gen A function argument, taking no parameters, used to generate values to assign to + * elements in the range [first,last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam Generator is a model of Generator, + * and \p Generator's \c result_type is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to fill a \c host_vector with random numbers, + * using the standard C library function \c rand using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::host_vector v(10); + * srand(13); + * thrust::generate(thrust::host, v.begin(), v.end(), rand); + * + * // the elements of v are now pseudo-random numbers + * \endcode + * + * \see generate_n + * \see https://en.cppreference.com/w/cpp/algorithm/generate + */ +template +__host__ __device__ + void generate(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + Generator gen); + + +/*! \p generate assigns the result of invoking \p gen, a function object that takes no arguments, + * to each element in the range [first,last). + * + * \param first The first element in the range of interest. + * \param last The last element in the range of interest. + * \param gen A function argument, taking no parameters, used to generate values to assign to + * elements in the range [first,last). + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam Generator is a model of Generator, + * and \p Generator's \c result_type is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to fill a \c host_vector with random numbers, + * using the standard C library function \c rand. + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::host_vector v(10); + * srand(13); + * thrust::generate(v.begin(), v.end(), rand); + * + * // the elements of v are now pseudo-random numbers + * \endcode + * + * \see generate_n + * \see https://en.cppreference.com/w/cpp/algorithm/generate + */ +template + void generate(ForwardIterator first, + ForwardIterator last, + Generator gen); + + +/*! \p generate_n assigns the result of invoking \p gen, a function object that takes no arguments, + * to each element in the range [first,first + n). The return value is first + n. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element in the range of interest. + * \param n The size of the range of interest. + * \param gen A function argument, taking no parameters, used to generate values to assign to + * elements in the range [first,first + n). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Size is an integral type (either signed or unsigned). + * \tparam Generator is a model of Generator, + * and \p Generator's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types. + * + * The following code snippet demonstrates how to fill a \c host_vector with random numbers, + * using the standard C library function \c rand using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * #include + * ... + * thrust::host_vector v(10); + * srand(13); + * thrust::generate_n(thrust::host, v.begin(), 10, rand); + * + * // the elements of v are now pseudo-random numbers + * \endcode + * + * \see generate + * \see https://en.cppreference.com/w/cpp/algorithm/generate + */ +template +__host__ __device__ + OutputIterator generate_n(const thrust::detail::execution_policy_base &exec, + OutputIterator first, + Size n, + Generator gen); + + +/*! \p generate_n assigns the result of invoking \p gen, a function object that takes no arguments, + * to each element in the range [first,first + n). The return value is first + n. + * + * \param first The first element in the range of interest. + * \param n The size of the range of interest. + * \param gen A function argument, taking no parameters, used to generate values to assign to + * elements in the range [first,first + n). + * + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Size is an integral type (either signed or unsigned). + * \tparam Generator is a model of Generator, + * and \p Generator's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types. + * + * The following code snippet demonstrates how to fill a \c host_vector with random numbers, + * using the standard C library function \c rand. + * + * \code + * #include + * #include + * #include + * ... + * thrust::host_vector v(10); + * srand(13); + * thrust::generate_n(v.begin(), 10, rand); + * + * // the elements of v are now pseudo-random numbers + * \endcode + * + * \see generate + * \see https://en.cppreference.com/w/cpp/algorithm/generate + */ +template + OutputIterator generate_n(OutputIterator first, + Size n, + Generator gen); + + +/*! \} // end transformations + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/inner_product.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/inner_product.h new file mode 100644 index 0000000000000000000000000000000000000000..80068cf0c834a147f8a754f92bc1322ecbb57b9b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/inner_product.h @@ -0,0 +1,262 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file inner_product.h + * \brief Mathematical inner product between ranges + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reductions + * \{ + * \addtogroup transformed_reductions Transformed Reductions + * \ingroup reductions + * \{ + */ + + +/*! \p inner_product calculates an inner product of the ranges + * [first1, last1) and [first2, first2 + (last1 - first1)). + * + * Specifically, this version of \p inner_product computes the sum + * init + (*first1 * *first2) + (*(first1+1) * *(first2+1)) + ... + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param init Initial value of the result. + * \return The inner product of sequences [first1, last1) + * and [first2, last2) plus \p init. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputType is a model of Assignable, + * and if \c x is an object of type \p OutputType, and \c y is an object of \p InputIterator1's \c value_type, + * and \c z is an object of \p InputIterator2's \c value_type, then x + y * z is defined + * and is convertible to \p OutputType. + * + * The following code demonstrates how to use \p inner_product to + * compute the dot product of two vectors using the \p thrust::host execution policy for parallelization. + * + * \code + * #include + * #include + * ... + * float vec1[3] = {1.0f, 2.0f, 5.0f}; + * float vec2[3] = {4.0f, 1.0f, 5.0f}; + * + * float result = thrust::inner_product(thrust::host, vec1, vec1 + 3, vec2, 0.0f); + * + * // result == 31.0f + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/inner_product + */ +template +__host__ __device__ +OutputType inner_product(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputType init); + + +/*! \p inner_product calculates an inner product of the ranges + * [first1, last1) and [first2, first2 + (last1 - first1)). + * + * Specifically, this version of \p inner_product computes the sum + * init + (*first1 * *first2) + (*(first1+1) * *(first2+1)) + ... + * + * Unlike the C++ Standard Template Library function std::inner_product, + * this version offers no guarantee on order of execution. + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param init Initial value of the result. + * \return The inner product of sequences [first1, last1) + * and [first2, last2) plus \p init. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputType is a model of Assignable, + * and if \c x is an object of type \p OutputType, and \c y is an object of \p InputIterator1's \c value_type, + * and \c z is an object of \p InputIterator2's \c value_type, then x + y * z is defined + * and is convertible to \p OutputType. + * + * The following code demonstrates how to use \p inner_product to + * compute the dot product of two vectors. + * + * \code + * #include + * ... + * float vec1[3] = {1.0f, 2.0f, 5.0f}; + * float vec2[3] = {4.0f, 1.0f, 5.0f}; + * + * float result = thrust::inner_product(vec1, vec1 + 3, vec2, 0.0f); + * + * // result == 31.0f + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/inner_product + */ +template +OutputType inner_product(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, OutputType init); + + +/*! \p inner_product calculates an inner product of the ranges + * [first1, last1) and [first2, first2 + (last1 - first1)). + * + * This version of \p inner_product is identical to the first, except that is uses + * two user-supplied function objects instead of \c operator+ and \c operator*. + * + * Specifically, this version of \p inner_product computes the sum + * binary_op1( init, binary_op2(*first1, *first2) ), ... + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param init Initial value of the result. + * \param binary_op1 Generalized addition operation. + * \param binary_op2 Generalized multiplication operation. + * \return The inner product of sequences [first1, last1) and [first2, last2). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is convertible to \p BinaryFunction2's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator. + * and \p InputIterator2's \c value_type is convertible to \p BinaryFunction2's \c second_argument_type. + * \tparam OutputType is a model of Assignable, + * and \p OutputType is convertible to \p BinaryFunction1's \c first_argument_type. + * \tparam BinaryFunction1 is a model of Binary Function, + * and \p BinaryFunction1's \c return_type is convertible to \p OutputType. + * \tparam BinaryFunction2 is a model of Binary Function, + * and \p BinaryFunction2's \c return_type is convertible to \p BinaryFunction1's \c second_argument_type. + * + * \code + * #include + * #include + * ... + * float vec1[3] = {1.0f, 2.0f, 5.0f}; + * float vec2[3] = {4.0f, 1.0f, 5.0f}; + * + * float init = 0.0f; + * thrust::plus binary_op1; + * thrust::multiplies binary_op2; + * + * float result = thrust::inner_product(thrust::host, vec1, vec1 + 3, vec2, init, binary_op1, binary_op2); + * + * // result == 31.0f + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/inner_product + */ +template +__host__ __device__ +OutputType inner_product(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputType init, + BinaryFunction1 binary_op1, + BinaryFunction2 binary_op2); + + +/*! \p inner_product calculates an inner product of the ranges + * [first1, last1) and [first2, first2 + (last1 - first1)). + * + * This version of \p inner_product is identical to the first, except that is uses + * two user-supplied function objects instead of \c operator+ and \c operator*. + * + * Specifically, this version of \p inner_product computes the sum + * binary_op1( init, binary_op2(*first1, *first2) ), ... + * + * Unlike the C++ Standard Template Library function std::inner_product, + * this version offers no guarantee on order of execution. + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param init Initial value of the result. + * \param binary_op1 Generalized addition operation. + * \param binary_op2 Generalized multiplication operation. + * \return The inner product of sequences [first1, last1) and [first2, last2). + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator1's \c value_type is convertible to \p BinaryFunction2's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator. + * and \p InputIterator2's \c value_type is convertible to \p BinaryFunction2's \c second_argument_type. + * \tparam OutputType is a model of Assignable, + * and \p OutputType is convertible to \p BinaryFunction1's \c first_argument_type. + * \tparam BinaryFunction1 is a model of Binary Function, + * and \p BinaryFunction1's \c return_type is convertible to \p OutputType. + * \tparam BinaryFunction2 is a model of Binary Function, + * and \p BinaryFunction2's \c return_type is convertible to \p BinaryFunction1's \c second_argument_type. + * + * \code + * #include + * ... + * float vec1[3] = {1.0f, 2.0f, 5.0f}; + * float vec2[3] = {4.0f, 1.0f, 5.0f}; + * + * float init = 0.0f; + * thrust::plus binary_op1; + * thrust::multiplies binary_op2; + * + * float result = thrust::inner_product(vec1, vec1 + 3, vec2, init, binary_op1, binary_op2); + * + * // result == 31.0f + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/inner_product + */ +template +OutputType inner_product(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, OutputType init, + BinaryFunction1 binary_op1, BinaryFunction2 binary_op2); + + +/*! \} // end transformed_reductions + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/limits.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/limits.h new file mode 100644 index 0000000000000000000000000000000000000000..52f38b1fc9e1aa2d92241338cf89d9dc4e5d1924 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/limits.h @@ -0,0 +1,18 @@ +// Copyright (c) 2018 NVIDIA Corporation +// Author: Bryce Adelstein Lelbach +// +// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) + +#pragma once + +#include + +#include +#include + +THRUST_NAMESPACE_BEGIN + +template +struct numeric_limits : std::numeric_limits {}; + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/memory.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..819ac251301b1bdea886e5c0e3a82ecd850fab94 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/memory.h @@ -0,0 +1,396 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file thrust/memory.h + * \brief Abstractions for Thrust's memory model. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \defgroup memory_management Memory Management + * + * All Thrust functionalities related to memory allocation and deallocation. + * + */ + +/** \addtogroup memory_management Memory Management + * \{ + */ + +// define pointer for the purpose of Doxygenating it +// it is actually defined elsewhere +#if 0 +/*! \p pointer stores a pointer to an object allocated in memory. Like \p device_ptr, this + * type ensures type safety when dispatching standard algorithms on ranges resident in memory. + * + * \p pointer generalizes \p device_ptr by relaxing the backend system associated with the \p pointer. + * Instead of the backend system specified by \p THRUST_DEVICE_SYSTEM, \p pointer's + * system is given by its second template parameter, \p Tag. For the purpose of Thrust dispatch, + * device_ptr and pointer are considered equivalent. + * + * The raw pointer encapsulated by a \p pointer may be obtained through its get member function + * or the \p raw_pointer_cast free function. + * + * \tparam Element specifies the type of the pointed-to object. + * + * \tparam Tag specifies the system with which this \p pointer is associated. This may be any Thrust + * backend system, or a user-defined tag. + * + * \tparam Reference allows the client to specify the reference type returned upon derereference. + * By default, this type is reference. + * + * \tparam Derived allows the client to specify the name of the derived type when \p pointer is used as + * a base class. This is useful to ensure that arithmetic on values of the derived type return + * values of the derived type as a result. By default, this type is pointer. + * + * \note \p pointer is not a smart pointer; it is the client's responsibility to deallocate memory + * pointer to by \p pointer. + * + * \see device_ptr + * \see reference + * \see raw_pointer_cast + */ +template + class pointer +{ + public: + /*! The type of the raw pointer + */ + typedef typename super_t::base_type raw_pointer; + + /*! \p pointer's default constructor initializes its encapsulated pointer to \c 0 + */ + __host__ __device__ + pointer(); + + /*! This constructor allows construction of a pointer from a T*. + * + * \param ptr A raw pointer to copy from, presumed to point to a location in \p Tag's memory. + * \tparam OtherElement \p OtherElement shall be convertible to \p Element. + */ + template + __host__ __device__ + explicit pointer(OtherElement *ptr); + + /*! This contructor allows initialization from another pointer-like object. + * + * \param other The \p OtherPointer to copy. + * + * \tparam OtherPointer The tag associated with \p OtherPointer shall be convertible to \p Tag, + * and its element type shall be convertible to \p Element. + */ + template + __host__ __device__ + pointer(const OtherPointer &other, + typename thrust::detail::enable_if_pointer_is_convertible< + OtherPointer, + pointer + >::type * = 0); + + /*! Assignment operator allows assigning from another pointer-like object whose element type + * is convertible to \c Element. + * + * \param other The other pointer-like object to assign from. + * \return *this + * + * \tparam OtherPointer The tag associated with \p OtherPointer shall be convertible to \p Tag, + * and its element type shall be convertible to \p Element. + */ + template + __host__ __device__ + typename thrust::detail::enable_if_pointer_is_convertible< + OtherPointer, + pointer, + derived_type & + >::type + operator=(const OtherPointer &other); + + /*! \p get returns this \p pointer's encapsulated raw pointer. + * \return This \p pointer's raw pointer. + */ + __host__ __device__ + Element *get() const; +}; +#endif + +/*! This version of \p malloc allocates untyped uninitialized storage associated with a given system. + * + * \param system The Thrust system with which to associate the storage. + * \param n The number of bytes of storage to allocate. + * \return If allocation succeeds, a pointer to the allocated storage; a null pointer otherwise. + * The pointer must be deallocated with \p thrust::free. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * + * \pre \p DerivedPolicy must be publically derived from thrust::execution_policy. + * + * The following code snippet demonstrates how to use \p malloc to allocate a range of memory + * associated with Thrust's device system. + * + * \code + * #include + * ... + * // allocate some memory with thrust::malloc + * const int N = 100; + * thrust::device_system_tag device_sys; + * thrust::pointer void_ptr = thrust::malloc(device_sys, N); + * + * // manipulate memory + * ... + * + * // deallocate void_ptr with thrust::free + * thrust::free(device_sys, void_ptr); + * \endcode + * + * \see free + * \see device_malloc + */ +template +__host__ __device__ +pointer malloc(const thrust::detail::execution_policy_base &system, std::size_t n); + + +/*! This version of \p malloc allocates typed uninitialized storage associated with a given system. + * + * \param system The Thrust system with which to associate the storage. + * \param n The number of elements of type \c T which the storage should accomodate. + * \return If allocation succeeds, a pointer to an allocation large enough to accomodate \c n + * elements of type \c T; a null pointer otherwise. + * The pointer must be deallocated with \p thrust::free. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * + * \pre \p DerivedPolicy must be publically derived from thrust::execution_policy. + * + * The following code snippet demonstrates how to use \p malloc to allocate a range of memory + * to accomodate integers associated with Thrust's device system. + * + * \code + * #include + * ... + * // allocate storage for 100 ints with thrust::malloc + * const int N = 100; + * thrust::device_system_tag device_sys; + * thrust::pointer ptr = thrust::malloc(device_sys, N); + * + * // manipulate memory + * ... + * + * // deallocate ptr with thrust::free + * thrust::free(device_sys, ptr); + * \endcode + * + * \see free + * \see device_malloc + */ +template +__host__ __device__ +pointer malloc(const thrust::detail::execution_policy_base &system, std::size_t n); + + +/*! \p get_temporary_buffer returns a pointer to storage associated with a given Thrust system sufficient to store up to + * \p n objects of type \c T. If not enough storage is available to accomodate \p n objects, an implementation may return + * a smaller buffer. The number of objects the returned buffer can accomodate is also returned. + * + * Thrust uses \p get_temporary_buffer internally when allocating temporary storage required by algorithm implementations. + * + * The storage allocated with \p get_temporary_buffer must be returned to the system with \p return_temporary_buffer. + * + * \param system The Thrust system with which to associate the storage. + * \param n The requested number of objects of type \c T the storage should accomodate. + * \return A pair \c p such that p.first is a pointer to the allocated storage and p.second is the number of + * contiguous objects of type \c T that the storage can accomodate. If no storage can be allocated, p.first if + * no storage can be obtained. The storage must be returned to the system using \p return_temporary_buffer. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * + * \pre \p DerivedPolicy must be publically derived from thrust::execution_policy. + * + * The following code snippet demonstrates how to use \p get_temporary_buffer to allocate a range of memory + * to accomodate integers associated with Thrust's device system. + * + * \code + * #include + * ... + * // allocate storage for 100 ints with thrust::get_temporary_buffer + * const int N = 100; + * + * typedef thrust::pair< + * thrust::pointer, + * std::ptrdiff_t + * > ptr_and_size_t; + * + * thrust::device_system_tag device_sys; + * ptr_and_size_t ptr_and_size = thrust::get_temporary_buffer(device_sys, N); + * + * // manipulate up to 100 ints + * for(int i = 0; i < ptr_and_size.second; ++i) + * { + * *ptr_and_size.first = i; + * } + * + * // deallocate storage with thrust::return_temporary_buffer + * thrust::return_temporary_buffer(device_sys, ptr_and_size.first); + * \endcode + * + * \see malloc + * \see return_temporary_buffer + */ +template +__host__ __device__ +thrust::pair, typename thrust::pointer::difference_type> +get_temporary_buffer(const thrust::detail::execution_policy_base &system, typename thrust::pointer::difference_type n); + +/*! \p free deallocates the storage previously allocated by \p thrust::malloc. + * + * \param system The Thrust system with which the storage is associated. + * \param ptr A pointer previously returned by \p thrust::malloc. If \p ptr is null, \p free + * does nothing. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * + * \pre \p ptr shall have been returned by a previous call to thrust::malloc(system, n) or thrust::malloc(system, n) for some type \c T. + * + * The following code snippet demonstrates how to use \p free to deallocate a range of memory + * previously allocated with \p thrust::malloc. + * + * \code + * #include + * ... + * // allocate storage for 100 ints with thrust::malloc + * const int N = 100; + * thrust::device_system_tag device_sys; + * thrust::pointer ptr = thrust::malloc(device_sys, N); + * + * // mainpulate memory + * ... + * + * // deallocate ptr with thrust::free + * thrust::free(device_sys, ptr); + * \endcode + */ +template +__host__ __device__ +void free(const thrust::detail::execution_policy_base &system, Pointer ptr); + + +/*! \p return_temporary_buffer deallocates storage associated with a given Thrust system previously allocated by \p get_temporary_buffer. + * + * Thrust uses \p return_temporary_buffer internally when deallocating temporary storage required by algorithm implementations. + * + * \param system The Thrust system with which the storage is associated. + * \param p A pointer previously returned by \p thrust::get_temporary_buffer. If \p ptr is null, \p return_temporary_buffer does nothing. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * + * \pre \p p shall have been previously allocated by \p thrust::get_temporary_buffer. + * + * The following code snippet demonstrates how to use \p return_temporary_buffer to deallocate a range of memory + * previously allocated by \p get_temporary_buffer. + * + * \code + * #include + * ... + * // allocate storage for 100 ints with thrust::get_temporary_buffer + * const int N = 100; + * + * typedef thrust::pair< + * thrust::pointer, + * std::ptrdiff_t + * > ptr_and_size_t; + * + * thrust::device_system_tag device_sys; + * ptr_and_size_t ptr_and_size = thrust::get_temporary_buffer(device_sys, N); + * + * // manipulate up to 100 ints + * for(int i = 0; i < ptr_and_size.second; ++i) + * { + * *ptr_and_size.first = i; + * } + * + * // deallocate storage with thrust::return_temporary_buffer + * thrust::return_temporary_buffer(device_sys, ptr_and_size.first); + * \endcode + * + * \see free + * \see get_temporary_buffer + */ +template +__host__ __device__ +void return_temporary_buffer(const thrust::detail::execution_policy_base &system, Pointer p, std::ptrdiff_t n); + + +/*! \p raw_pointer_cast creates a "raw" pointer from a pointer-like type, + * simply returning the wrapped pointer, should it exist. + * + * \param ptr The pointer of interest. + * \return ptr.get(), if the expression is well formed; ptr, otherwise. + * \see raw_reference_cast + */ +template +__host__ __device__ +typename thrust::detail::pointer_traits::raw_pointer + raw_pointer_cast(Pointer ptr); + + +/*! \p raw_reference_cast creates a "raw" reference from a wrapped reference type, + * simply returning the underlying reference, should it exist. + * + * If the argument is not a reference wrapper, the result is a reference to the argument. + * + * \param ref The reference of interest. + * \return *thrust::raw_pointer_cast(&ref). + * \note There are two versions of \p raw_reference_cast. One for const references, + * and one for non-const. + * \see raw_pointer_cast + */ +template +__host__ __device__ +typename detail::raw_reference::type + raw_reference_cast(T &ref); + + +/*! \p raw_reference_cast creates a "raw" reference from a wrapped reference type, + * simply returning the underlying reference, should it exist. + * + * If the argument is not a reference wrapper, the result is a reference to the argument. + * + * \param ref The reference of interest. + * \return *thrust::raw_pointer_cast(&ref). + * \note There are two versions of \p raw_reference_cast. One for const references, + * and one for non-const. + * \see raw_pointer_cast + */ +template +__host__ __device__ +typename detail::raw_reference::type + raw_reference_cast(const T &ref); + +/*! \} // memory_management + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/merge.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/merge.h new file mode 100644 index 0000000000000000000000000000000000000000..724f4c167f16b7306328e85236e971966e52b247 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/merge.h @@ -0,0 +1,677 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file merge.h + * \brief Merging sorted ranges + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup merging Merging + * \ingroup algorithms + * \{ + */ + + +/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) + * into a single sorted range. That is, it copies from [first1, last1) and + * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) + * such that the resulting range is in ascending order. \p merge is stable, meaning both that the + * relative order of elements within each input range is preserved, and that for equivalent elements + * in both input ranges the element from the first range precedes the element from the second. The + * return value is result + (last1 - first1) + (last2 - first2). + * + * This version of \p merge compares elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the merged output. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use + * \p merge to compute the merger of two sorted sets of integers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A1[6] = {1, 3, 5, 7, 9, 11}; + * int A2[7] = {1, 1, 2, 3, 5, 8, 13}; + * + * int result[13]; + * + * int *result_end = + * thrust::merge(thrust::host, + * A1, A1 + 6, + * A2, A2 + 7, + * result); + * // result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/merge + * \see \p set_union + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator merge(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) + * into a single sorted range. That is, it copies from [first1, last1) and + * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) + * such that the resulting range is in ascending order. \p merge is stable, meaning both that the + * relative order of elements within each input range is preserved, and that for equivalent elements + * in both input ranges the element from the first range precedes the element from the second. The + * return value is result + (last1 - first1) + (last2 - first2). + * + * This version of \p merge compares elements using \c operator<. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the merged output. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use + * \p merge to compute the merger of two sorted sets of integers. + * + * \code + * #include + * ... + * int A1[6] = {1, 3, 5, 7, 9, 11}; + * int A2[7] = {1, 1, 2, 3, 5, 8, 13}; + * + * int result[13]; + * + * int *result_end = thrust::merge(A1, A1 + 6, A2, A2 + 7, result); + * // result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/merge + * \see \p set_union + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator merge(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) + * into a single sorted range. That is, it copies from [first1, last1) and + * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) + * such that the resulting range is in ascending order. \p merge is stable, meaning both that the + * relative order of elements within each input range is preserved, and that for equivalent elements + * in both input ranges the element from the first range precedes the element from the second. The + * return value is result + (last1 - first1) + (last2 - first2). + * + * This version of \p merge compares elements using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the merged output. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use + * \p merge to compute the merger of two sets of integers sorted in + * descending order using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A1[6] = {11, 9, 7, 5, 3, 1}; + * int A2[7] = {13, 8, 5, 3, 2, 1, 1}; + * + * int result[13]; + * + * int *result_end = thrust::merge(thrust::host, + * A1, A1 + 6, + * A2, A2 + 7, + * result, + * thrust::greater()); + * // result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/merge + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator merge(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p merge combines two sorted ranges [first1, last1) and [first2, last2) + * into a single sorted range. That is, it copies from [first1, last1) and + * [first2, last2) into [result, result + (last1 - first1) + (last2 - first2)) + * such that the resulting range is in ascending order. \p merge is stable, meaning both that the + * relative order of elements within each input range is preserved, and that for equivalent elements + * in both input ranges the element from the first range precedes the element from the second. The + * return value is result + (last1 - first1) + (last2 - first2). + * + * This version of \p merge compares elements using a function object \p comp. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the merged output. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use + * \p merge to compute the merger of two sets of integers sorted in + * descending order. + * + * \code + * #include + * #include + * ... + * int A1[6] = {11, 9, 7, 5, 3, 1}; + * int A2[7] = {13, 8, 5, 3, 2, 1, 1}; + * + * int result[13]; + * + * int *result_end = thrust::merge(A1, A1 + 6, A2, A2 + 7, result, thrust::greater()); + * // result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/merge + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator merge(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from + * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, + * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending key order. + * + * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) + * and [values_first2 + (keys_last2 - keys_first2)) into a single range, + * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending order implied by each input element's associated key. + * + * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is + * preserved, and that for equivalent elements in all input key ranges the element from the first range + * precedes the element from the second. + * + * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) + * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the merged output range of keys. + * \param values_result The beginning of the merged output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use + * \p merge_by_key to compute the merger of two sets of integers sorted in + * ascending order using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A_keys[6] = {1, 3, 5, 7, 9, 11}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {1, 1, 2, 3, 5, 8, 13}; + * int B_vals[7] = {1, 1, 1, 1, 1, 1, 1}; + * + * int keys_result[13]; + * int vals_result[13]; + * + * thrust::pair end = + * thrust::merge_by_key(thrust::host, + * A_keys, A_keys + 6, + * B_keys, B_keys + 7, + * A_vals, B_vals, + * keys_result, vals_result); + * + * // keys_result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} + * // vals_result = {0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1} + * \endcode + * + * \see merge + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + merge_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, InputIterator1 keys_last1, + InputIterator2 keys_first2, InputIterator2 keys_last2, + InputIterator3 values_first1, InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from + * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, + * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending key order. + * + * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) + * and [values_first2 + (keys_last2 - keys_first2)) into a single range, + * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending order implied by each input element's associated key. + * + * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is + * preserved, and that for equivalent elements in all input key ranges the element from the first range + * precedes the element from the second. + * + * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) + * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the merged output range of keys. + * \param values_result The beginning of the merged output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use + * \p merge_by_key to compute the merger of two sets of integers sorted in + * ascending order. + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {1, 3, 5, 7, 9, 11}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {1, 1, 2, 3, 5, 8, 13}; + * int B_vals[7] = {1, 1, 1, 1, 1, 1, 1}; + * + * int keys_result[13]; + * int vals_result[13]; + * + * thrust::pair end = thrust::merge_by_key(A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, B_vals, keys_result, vals_result); + * + * // keys_result = {1, 1, 1, 2, 3, 3, 5, 5, 7, 8, 9, 11, 13} + * // vals_result = {0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1} + * \endcode + * + * \see merge + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + merge_by_key(InputIterator1 keys_first1, InputIterator1 keys_last1, + InputIterator2 keys_first2, InputIterator2 keys_last2, + InputIterator3 values_first1, InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from + * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, + * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending key order. + * + * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) + * and [values_first2 + (keys_last2 - keys_first2)) into a single range, + * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending order implied by each input element's associated key. + * + * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is + * preserved, and that for equivalent elements in all input key ranges the element from the first range + * precedes the element from the second. + * + * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) + * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). + * + * This version of \p merge_by_key compares key elements using a function object \p comp. + * + * The algorithm's execution is parallelized using \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the merged output range of keys. + * \param values_result The beginning of the merged output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use + * \p merge_by_key to compute the merger of two sets of integers sorted in + * descending order using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A_keys[6] = {11, 9, 7, 5, 3, 1}; + * int A_vals[6] = { 0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {13, 8, 5, 3, 2, 1, 1}; + * int B_vals[7] = { 1, 1, 1, 1, 1, 1, 1}; + * + * int keys_result[13]; + * int vals_result[13]; + * + * thrust::pair end = + * thrust::merge_by_key(thrust::host, + * A_keys, A_keys + 6, + * B_keys, B_keys + 7, + * A_vals, B_vals, + * keys_result, vals_result, + * thrust::greater()); + * + * // keys_result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} + * // vals_result = { 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1} + * \endcode + * + * \see merge + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + merge_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, InputIterator1 keys_last1, + InputIterator2 keys_first2, InputIterator2 keys_last2, + InputIterator3 values_first1, InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + Compare comp); + + +/*! \p merge_by_key performs a key-value merge. That is, \p merge_by_key copies elements from + * [keys_first1, keys_last1) and [keys_first2, keys_last2) into a single range, + * [keys_result, keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending key order. + * + * At the same time, \p merge_by_key copies elements from the two associated ranges [values_first1 + (keys_last1 - keys_first1)) + * and [values_first2 + (keys_last2 - keys_first2)) into a single range, + * [values_result, values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) such that + * the resulting range is in ascending order implied by each input element's associated key. + * + * \p merge_by_key is stable, meaning both that the relative order of elements within each input range is + * preserved, and that for equivalent elements in all input key ranges the element from the first range + * precedes the element from the second. + * + * The return value is is (keys_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)) + * and (values_result + (keys_last1 - keys_first1) + (keys_last2 - keys_first2)). + * + * This version of \p merge_by_key compares key elements using a function object \p comp. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the merged output range of keys. + * \param values_result The beginning of the merged output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator1's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use + * \p merge_by_key to compute the merger of two sets of integers sorted in + * descending order. + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {11, 9, 7, 5, 3, 1}; + * int A_vals[6] = { 0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {13, 8, 5, 3, 2, 1, 1}; + * int B_vals[7] = { 1, 1, 1, 1, 1, 1, 1}; + * + * int keys_result[13]; + * int vals_result[13]; + * + * thrust::pair end = thrust::merge_by_key(A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, B_vals, keys_result, vals_result, thrust::greater()); + * + * // keys_result = {13, 11, 9, 8, 7, 5, 5, 3, 3, 2, 1, 1, 1} + * // vals_result = { 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1} + * \endcode + * + * \see merge + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + merge_by_key(InputIterator1 keys_first1, InputIterator1 keys_last1, + InputIterator2 keys_first2, InputIterator2 keys_last2, + InputIterator3 values_first1, InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \} // merging + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/mismatch.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/mismatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bbdf2923a09e87b143920454d4473204bd3a0388 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/mismatch.h @@ -0,0 +1,257 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file mismatch.h + * \brief Search for differences between ranges + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + +/*! \addtogroup searching + * \ingroup algorithms + * \{ + */ + + +/*! \p mismatch finds the first position where the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) differ. The two versions of + * \p mismatch use different tests for whether elements differ. + * + * This version of \p mismatch finds the first iterator \c i in [first1, last1) + * such that *i == *(first2 + (i - first1)) is \c false. The return value is a + * \c pair whose first element is \c i and whose second element is *(first2 + (i - first1)). + * If no such iterator \c i exists, the return value is a \c pair whose first element + * is \c last1 and whose second element is *(first2 + (last1 - first1)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \return The first position where the sequences differ. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * and \p InputIterator1's \c value_type is equality comparable to \p InputIterator2's \c value_type. + * \tparam InputIterator2 is a model of Input Iterator. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector vec1(4); + * thrust::device_vector vec2(4); + * + * vec1[0] = 0; vec2[0] = 0; + * vec1[1] = 5; vec2[1] = 5; + * vec1[2] = 3; vec2[2] = 8; + * vec1[3] = 7; vec2[3] = 7; + * + * typedef thrust::device_vector::iterator Iterator; + * thrust::pair result; + * + * result = thrust::mismatch(thrust::device, vec1.begin(), vec1.end(), vec2.begin()); + * + * // result.first is vec1.begin() + 2 + * // result.second is vec2.begin() + 2 + * \endcode + * + * \see find + * \see find_if + */ +template +__host__ __device__ +thrust::pair mismatch(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2); + + +/*! \p mismatch finds the first position where the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) differ. The two versions of + * \p mismatch use different tests for whether elements differ. + * + * This version of \p mismatch finds the first iterator \c i in [first1, last1) + * such that *i == *(first2 + (i - first1)) is \c false. The return value is a + * \c pair whose first element is \c i and whose second element is *(first2 + (i - first1)). + * If no such iterator \c i exists, the return value is a \c pair whose first element + * is \c last1 and whose second element is *(first2 + (last1 - first1)). + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \return The first position where the sequences differ. + * + * \tparam InputIterator1 is a model of Input Iterator + * and \p InputIterator1's \c value_type is equality comparable to \p InputIterator2's \c value_type. + * \tparam InputIterator2 is a model of Input Iterator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector vec1(4); + * thrust::device_vector vec2(4); + * + * vec1[0] = 0; vec2[0] = 0; + * vec1[1] = 5; vec2[1] = 5; + * vec1[2] = 3; vec2[2] = 8; + * vec1[3] = 7; vec2[3] = 7; + * + * typedef thrust::device_vector::iterator Iterator; + * thrust::pair result; + * + * result = thrust::mismatch(vec1.begin(), vec1.end(), vec2.begin()); + * + * // result.first is vec1.begin() + 2 + * // result.second is vec2.begin() + 2 + * \endcode + * + * \see find + * \see find_if + */ +template +thrust::pair mismatch(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2); + + +/*! \p mismatch finds the first position where the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) differ. The two versions of + * \p mismatch use different tests for whether elements differ. + * + * This version of \p mismatch finds the first iterator \c i in [first1, last1) + * such that pred(\*i, \*(first2 + (i - first1)) is \c false. The return value is a + * \c pair whose first element is \c i and whose second element is *(first2 + (i - first1)). + * If no such iterator \c i exists, the return value is a \c pair whose first element is + * \c last1 and whose second element is *(first2 + (last1 - first1)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param pred The binary predicate to compare elements. + * \return The first position where the sequences differ. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator. + * \tparam InputIterator2 is a model of Input Iterator. + * \tparam Predicate is a model of Input Iterator. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector vec1(4); + * thrust::device_vector vec2(4); + * + * vec1[0] = 0; vec2[0] = 0; + * vec1[1] = 5; vec2[1] = 5; + * vec1[2] = 3; vec2[2] = 8; + * vec1[3] = 7; vec2[3] = 7; + * + * typedef thrust::device_vector::iterator Iterator; + * thrust::pair result; + * + * result = thrust::mismatch(thrust::device, vec1.begin(), vec1.end(), vec2.begin(), thrust::equal_to()); + * + * // result.first is vec1.begin() + 2 + * // result.second is vec2.begin() + 2 + * \endcode + * + * \see find + * \see find_if + */ +template +__host__ __device__ +thrust::pair mismatch(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + BinaryPredicate pred); + + +/*! \p mismatch finds the first position where the two ranges [first1, last1) + * and [first2, first2 + (last1 - first1)) differ. The two versions of + * \p mismatch use different tests for whether elements differ. + * + * This version of \p mismatch finds the first iterator \c i in [first1, last1) + * such that pred(\*i, \*(first2 + (i - first1)) is \c false. The return value is a + * \c pair whose first element is \c i and whose second element is *(first2 + (i - first1)). + * If no such iterator \c i exists, the return value is a \c pair whose first element is + * \c last1 and whose second element is *(first2 + (last1 - first1)). + * + * \param first1 The beginning of the first sequence. + * \param last1 The end of the first sequence. + * \param first2 The beginning of the second sequence. + * \param pred The binary predicate to compare elements. + * \return The first position where the sequences differ. + * + * \tparam InputIterator1 is a model of Input Iterator. + * \tparam InputIterator2 is a model of Input Iterator. + * \tparam Predicate is a model of Input Iterator. + * + * \code + * #include + * #include + * ... + * thrust::device_vector vec1(4); + * thrust::device_vector vec2(4); + * + * vec1[0] = 0; vec2[0] = 0; + * vec1[1] = 5; vec2[1] = 5; + * vec1[2] = 3; vec2[2] = 8; + * vec1[3] = 7; vec2[3] = 7; + * + * typedef thrust::device_vector::iterator Iterator; + * thrust::pair result; + * + * result = thrust::mismatch(vec1.begin(), vec1.end(), vec2.begin(), thrust::equal_to()); + * + * // result.first is vec1.begin() + 2 + * // result.second is vec2.begin() + 2 + * \endcode + * + * \see find + * \see find_if + */ +template +thrust::pair mismatch(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + BinaryPredicate pred); + +/*! \} // end searching + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/optional.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/optional.h new file mode 100644 index 0000000000000000000000000000000000000000..a1ca4f46554b87666863bb64be6e2d40fbd3a38a --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/optional.h @@ -0,0 +1,2876 @@ +/// +// optional - An implementation of std::optional with extensions +// Written in 2017 by Sy Brand (@TartanLlama) +// +// To the extent possible under law, the author(s) have dedicated all +// copyright and related and neighboring rights to this software to the +// public domain worldwide. This software is distributed without any warranty. +// +// You should have received a copy of the CC0 Public Domain Dedication +// along with this software. If not, see +// . +/// + +#pragma once + +#include +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include + +#define THRUST_OPTIONAL_VERSION_MAJOR 0 +#define THRUST_OPTIONAL_VERSION_MINOR 2 + +#include +#include +#include +#include +#include + +#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC && _MSC_VER == 1900) +#define THRUST_OPTIONAL_MSVC2015 +#endif + +#if (defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ <= 9 && \ + !defined(__clang__)) +#define THRUST_OPTIONAL_GCC49 +#endif + +#if (defined(__GNUC__) && __GNUC__ == 5 && __GNUC_MINOR__ <= 4 && \ + !defined(__clang__)) +#define THRUST_OPTIONAL_GCC54 +#endif + +#if (defined(__GNUC__) && __GNUC__ == 5 && __GNUC_MINOR__ <= 5 && \ + !defined(__clang__)) +#define THRUST_OPTIONAL_GCC55 +#endif + +#if (defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ <= 9 && \ + !defined(__clang__)) +// GCC < 5 doesn't support overloading on const&& for member functions +#define THRUST_OPTIONAL_NO_CONSTRR + +// GCC < 5 doesn't support some standard C++11 type traits +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) \ + std::has_trivial_copy_constructor::value +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_ASSIGNABLE(T) std::has_trivial_copy_assign::value + +// GCC < 5 doesn't provide a way to emulate std::is_trivially_move_*, +// so don't enable any optimizations that rely on them: +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T) false +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_ASSIGNABLE(T) false + +// This one will be different for GCC 5.7 if it's ever supported +#define THRUST_OPTIONAL_IS_TRIVIALLY_DESTRUCTIBLE(T) std::is_trivially_destructible::value + +// GCC 5 < v < 8 has a bug in is_trivially_copy_constructible which breaks std::vector +// for non-copyable types +#elif (defined(__GNUC__) && __GNUC__ < 8 && \ + !defined(__clang__)) +#ifndef THRUST_GCC_LESS_8_TRIVIALLY_COPY_CONSTRUCTIBLE_MUTEX +#define THRUST_GCC_LESS_8_TRIVIALLY_COPY_CONSTRUCTIBLE_MUTEX +THRUST_NAMESPACE_BEGIN + namespace detail { + template + struct is_trivially_copy_constructible : std::is_trivially_copy_constructible{}; +#ifdef _GLIBCXX_VECTOR + template + struct is_trivially_copy_constructible> + : std::is_trivially_copy_constructible{}; +#endif + } +THRUST_NAMESPACE_END +#endif + +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) \ + thrust::detail::is_trivially_copy_constructible::value +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_ASSIGNABLE(T) \ + std::is_trivially_copy_assignable::value +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \ + std::is_trivially_move_constructible::value +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_ASSIGNABLE(T) \ + std::is_trivially_move_assignable::value +#define THRUST_OPTIONAL_IS_TRIVIALLY_DESTRUCTIBLE(T) std::is_trivially_destructible::value +#else + +// To support clang + old libstdc++ without type traits, check for equivalent +// clang built-ins and use them if present. See note above +// is_trivially_copyable_impl in +// thrust/type_traits/is_trivially_relocatable.h for more details. + +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#if defined(__GLIBCXX__) && __has_feature(is_trivially_constructible) +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) \ + __is_trivially_constructible(T, T const&) +#else +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_CONSTRUCTIBLE(T) \ + std::is_trivially_copy_constructible::value +#endif + +#if defined(__GLIBCXX__) && __has_feature(is_trivially_assignable) +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_ASSIGNABLE(T) \ + __is_trivially_assignable(T&, T const&) +#else +#define THRUST_OPTIONAL_IS_TRIVIALLY_COPY_ASSIGNABLE(T) \ + std::is_trivially_copy_assignable::value +#endif + +#if defined(__GLIBCXX__) && __has_feature(is_trivially_constructible) +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \ + __is_trivially_constructible(T, T&&) +#else +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_CONSTRUCTIBLE(T) \ + std::is_trivially_move_constructible::value +#endif + +#if defined(__GLIBCXX__) && __has_feature(is_trivially_assignable) +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_ASSIGNABLE(T) \ + __is_trivially_assignable(T&, T&&) +#else +#define THRUST_OPTIONAL_IS_TRIVIALLY_MOVE_ASSIGNABLE(T) \ + std::is_trivially_move_assignable::value +#endif + +#if defined(__GLIBCXX__) && __has_feature(is_trivially_destructible) +#define THRUST_OPTIONAL_IS_TRIVIALLY_DESTRUCTIBLE(T) \ + __is_trivially_destructible(T) +#else +#define THRUST_OPTIONAL_IS_TRIVIALLY_DESTRUCTIBLE(T) \ + std::is_trivially_destructible::value +#endif + +#endif + +#if THRUST_CPP_DIALECT > 2011 +#define THRUST_OPTIONAL_CPP14 +#endif + +// constexpr implies const in C++11, not C++14 +#if (THRUST_CPP_DIALECT == 2011 || defined(THRUST_OPTIONAL_MSVC2015) || \ + defined(THRUST_OPTIONAL_GCC49)) +/// \exclude +#define THRUST_OPTIONAL_CPP11_CONSTEXPR +#else +/// \exclude +#define THRUST_OPTIONAL_CPP11_CONSTEXPR constexpr +#endif + +THRUST_NAMESPACE_BEGIN + +#ifndef THRUST_MONOSTATE_INPLACE_MUTEX +#define THRUST_MONOSTATE_INPLACE_MUTEX +/// \brief Used to represent an optional with no data; essentially a bool +class monostate {}; + +/// \brief A tag type to tell optional to construct its value in-place +struct in_place_t { + explicit in_place_t() = default; +}; +/// \brief A tag to tell optional to construct its value in-place +static constexpr in_place_t in_place{}; +#endif + +template class optional; + +/// \exclude +namespace detail { +#ifndef THRUST_TRAITS_MUTEX +#define THRUST_TRAITS_MUTEX +// C++14-style aliases for brevity +template using remove_const_t = typename std::remove_const::type; +template +using remove_reference_t = typename std::remove_reference::type; +template using decay_t = typename std::decay::type; +template +using enable_if_t = typename std::enable_if::type; +template +using conditional_t = typename std::conditional::type; + +// std::conjunction from C++17 +template struct conjunction : std::true_type {}; +template struct conjunction : B {}; +template +struct conjunction + : std::conditional, B>::type {}; + +#if defined(_LIBCPP_VERSION) && THRUST_CPP_DIALECT == 2011 +#define THRUST_OPTIONAL_LIBCXX_MEM_FN_WORKAROUND +#endif + +// In C++11 mode, there's an issue in libc++'s std::mem_fn +// which results in a hard-error when using it in a noexcept expression +// in some cases. This is a check to workaround the common failing case. +#ifdef THRUST_OPTIONAL_LIBCXX_MEM_FN_WORKAROUND +template struct is_pointer_to_non_const_member_func : std::false_type{}; +template +struct is_pointer_to_non_const_member_func : std::true_type{}; +template +struct is_pointer_to_non_const_member_func : std::true_type{}; +template +struct is_pointer_to_non_const_member_func : std::true_type{}; +template +struct is_pointer_to_non_const_member_func : std::true_type{}; +template +struct is_pointer_to_non_const_member_func : std::true_type{}; +template +struct is_pointer_to_non_const_member_func : std::true_type{}; + +template struct is_const_or_const_ref : std::false_type{}; +template struct is_const_or_const_ref : std::true_type{}; +template struct is_const_or_const_ref : std::true_type{}; +#endif + +// std::invoke from C++17 +// https://stackoverflow.com/questions/38288042/c11-14-invoke-workaround +__thrust_exec_check_disable__ +template ::value + && is_const_or_const_ref::value)>, +#endif + typename = enable_if_t>::value>, + int = 0> +__host__ __device__ +constexpr auto invoke(Fn &&f, Args &&... args) + noexcept(noexcept(std::mem_fn(f)(std::forward(args)...))) + THRUST_TRAILING_RETURN(decltype(std::mem_fn(f)(std::forward(args)...))) +{ + return std::mem_fn(f)(std::forward(args)...); +} + +__thrust_exec_check_disable__ +template >::value>> +__host__ __device__ +constexpr auto invoke(Fn &&f, Args &&... args) + noexcept(noexcept(std::forward(f)(std::forward(args)...))) + THRUST_TRAILING_RETURN(decltype(std::forward(f)(std::forward(args)...))) +{ + return std::forward(f)(std::forward(args)...); +} +#endif + +// std::void_t from C++17 +template struct voider { using type = void; }; +template using void_t = typename voider::type; + +// Trait for checking if a type is a thrust::optional +template struct is_optional_impl : std::false_type {}; +template struct is_optional_impl> : std::true_type {}; +template using is_optional = is_optional_impl>; + +// Change void to thrust::monostate +template +using fixup_void = conditional_t::value, monostate, U>; + +template > +using get_map_return = optional>>; + +// Check if invoking F for some Us returns void +template struct returns_void_impl; +template +struct returns_void_impl>, U...> + : std::is_void> {}; +template +using returns_void = returns_void_impl; + +template +using enable_if_ret_void = enable_if_t::value>; + +template +using disable_if_ret_void = enable_if_t::value>; + +template +using enable_forward_value = + detail::enable_if_t::value && + !std::is_same, in_place_t>::value && + !std::is_same, detail::decay_t>::value>; + +template +using enable_from_other = detail::enable_if_t< + std::is_constructible::value && + !std::is_constructible &>::value && + !std::is_constructible &&>::value && + !std::is_constructible &>::value && + !std::is_constructible &&>::value && + !std::is_convertible &, T>::value && + !std::is_convertible &&, T>::value && + !std::is_convertible &, T>::value && + !std::is_convertible &&, T>::value>; + +template +using enable_assign_forward = detail::enable_if_t< + !std::is_same, detail::decay_t>::value && + !detail::conjunction, + std::is_same>>::value && + std::is_constructible::value && std::is_assignable::value>; + +template +using enable_assign_from_other = detail::enable_if_t< + std::is_constructible::value && + std::is_assignable::value && + !std::is_constructible &>::value && + !std::is_constructible &&>::value && + !std::is_constructible &>::value && + !std::is_constructible &&>::value && + !std::is_convertible &, T>::value && + !std::is_convertible &&, T>::value && + !std::is_convertible &, T>::value && + !std::is_convertible &&, T>::value && + !std::is_assignable &>::value && + !std::is_assignable &&>::value && + !std::is_assignable &>::value && + !std::is_assignable &&>::value>; + +#if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC +// TODO make a version which works with MSVC +template struct is_swappable : std::true_type {}; + +template struct is_nothrow_swappable : std::true_type {}; +#else +// https://stackoverflow.com/questions/26744589/what-is-a-proper-way-to-implement-is-swappable-to-test-for-the-swappable-concept +namespace swap_adl_tests { +// if swap ADL finds this then it would call std::swap otherwise (same +// signature) +struct tag {}; + +template tag swap(T &, T &); +template tag swap(T (&a)[N], T (&b)[N]); + +// helper functions to test if an unqualified swap is possible, and if it +// becomes std::swap +template std::false_type can_swap(...) noexcept(false); +template (), std::declval()))> +std::true_type can_swap(int) noexcept(noexcept(swap(std::declval(), + std::declval()))); + +template std::false_type uses_std(...); +template +std::is_same(), std::declval())), tag> +uses_std(int); + +template +struct is_std_swap_noexcept + : std::integral_constant::value && + std::is_nothrow_move_assignable::value> {}; + +template +struct is_std_swap_noexcept : is_std_swap_noexcept {}; + +template +struct is_adl_swap_noexcept + : std::integral_constant(0))> {}; +} // namespace swap_adl_tests + +template +struct is_swappable + : std::integral_constant< + bool, + decltype(detail::swap_adl_tests::can_swap(0))::value && + (!decltype(detail::swap_adl_tests::uses_std(0))::value || + (std::is_move_assignable::value && + std::is_move_constructible::value))> {}; + +template +struct is_swappable + : std::integral_constant< + bool, + decltype(detail::swap_adl_tests::can_swap(0))::value && + (!decltype( + detail::swap_adl_tests::uses_std(0))::value || + is_swappable::value)> {}; + +template +struct is_nothrow_swappable + : std::integral_constant< + bool, + is_swappable::value && + ((decltype(detail::swap_adl_tests::uses_std(0))::value + &&detail::swap_adl_tests::is_std_swap_noexcept::value) || + (!decltype(detail::swap_adl_tests::uses_std(0))::value && + detail::swap_adl_tests::is_adl_swap_noexcept::value))> { +}; +#endif + +// The storage base manages the actual storage, and correctly propagates +// trivial destruction from T. This case is for when T is not trivially +// destructible. +template ::value> +struct optional_storage_base { + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional_storage_base() noexcept + : m_dummy(), m_has_value(false) {} + + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional_storage_base(in_place_t, U &&... u) + : m_value(std::forward(u)...), m_has_value(true) {} + + __thrust_exec_check_disable__ + __host__ __device__ + ~optional_storage_base() { + if (m_has_value) { + m_value.~T(); + m_has_value = false; + } + } + + struct dummy {}; + union { + dummy m_dummy; + T m_value; + }; + + bool m_has_value; +}; + +// This case is for when T is trivially destructible. +template struct optional_storage_base { + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional_storage_base() noexcept + : m_dummy(), m_has_value(false) {} + + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional_storage_base(in_place_t, U &&... u) + : m_value(std::forward(u)...), m_has_value(true) {} + + // No destructor, so this class is trivially destructible + + struct dummy {}; + union { + dummy m_dummy; + T m_value; + }; + + bool m_has_value = false; +}; + +// This base class provides some handy member functions which can be used in +// further derived classes +template struct optional_operations_base : optional_storage_base { + using optional_storage_base::optional_storage_base; + + __thrust_exec_check_disable__ + __host__ __device__ + void hard_reset() noexcept { + get().~T(); + this->m_has_value = false; + } + + __thrust_exec_check_disable__ + template + __host__ __device__ + void construct(Args &&... args) noexcept { + new (thrust::addressof(this->m_value)) T(std::forward(args)...); + this->m_has_value = true; + } + + __thrust_exec_check_disable__ + template + __host__ __device__ + void assign(Opt &&rhs) { + if (this->has_value()) { + if (rhs.has_value()) { + this->m_value = std::forward(rhs).get(); + } else { + this->m_value.~T(); + this->m_has_value = false; + } + } + + if (rhs.has_value()) { + construct(std::forward(rhs).get()); + } + } + + __thrust_exec_check_disable__ + __host__ __device__ + bool has_value() const { return this->m_has_value; } + + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &get() & { return this->m_value; } + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR const T &get() const & { return this->m_value; } + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &&get() && { return std::move(this->m_value); } +#ifndef THRUST_OPTIONAL_NO_CONSTRR + __thrust_exec_check_disable__ + __host__ __device__ + constexpr const T &&get() const && { return std::move(this->m_value); } +#endif +}; + +// This class manages conditionally having a trivial copy constructor +// This specialization is for when T is trivially copy constructible +template +struct optional_copy_base : optional_operations_base { + using optional_operations_base::optional_operations_base; +}; + +// This specialization is for when T is not trivially copy constructible +template +struct optional_copy_base : optional_operations_base { + using optional_operations_base::optional_operations_base; + + __thrust_exec_check_disable__ + optional_copy_base() = default; + __thrust_exec_check_disable__ + __host__ __device__ + optional_copy_base(const optional_copy_base &rhs) { + if (rhs.has_value()) { + this->construct(rhs.get()); + } else { + this->m_has_value = false; + } + } + + __thrust_exec_check_disable__ + optional_copy_base(optional_copy_base &&rhs) = default; + __thrust_exec_check_disable__ + optional_copy_base &operator=(const optional_copy_base &rhs) = default; + __thrust_exec_check_disable__ + optional_copy_base &operator=(optional_copy_base &&rhs) = default; +}; + +template +struct optional_move_base : optional_copy_base { + using optional_copy_base::optional_copy_base; +}; +template struct optional_move_base : optional_copy_base { + using optional_copy_base::optional_copy_base; + + __thrust_exec_check_disable__ + optional_move_base() = default; + __thrust_exec_check_disable__ + optional_move_base(const optional_move_base &rhs) = default; + + __thrust_exec_check_disable__ + __host__ __device__ + optional_move_base(optional_move_base &&rhs) noexcept( + std::is_nothrow_move_constructible::value) { + if (rhs.has_value()) { + this->construct(std::move(rhs.get())); + } else { + this->m_has_value = false; + } + } + __thrust_exec_check_disable__ + optional_move_base &operator=(const optional_move_base &rhs) = default; + __thrust_exec_check_disable__ + optional_move_base &operator=(optional_move_base &&rhs) = default; +}; + +// This class manages conditionally having a trivial copy assignment operator +template +struct optional_copy_assign_base : optional_move_base { + using optional_move_base::optional_move_base; +}; + +template +struct optional_copy_assign_base : optional_move_base { + using optional_move_base::optional_move_base; + + __thrust_exec_check_disable__ + optional_copy_assign_base() = default; + __thrust_exec_check_disable__ + optional_copy_assign_base(const optional_copy_assign_base &rhs) = default; + + __thrust_exec_check_disable__ + optional_copy_assign_base(optional_copy_assign_base &&rhs) = default; + __thrust_exec_check_disable__ + __host__ __device__ + optional_copy_assign_base &operator=(const optional_copy_assign_base &rhs) { + this->assign(rhs); + return *this; + } + __thrust_exec_check_disable__ + optional_copy_assign_base & + operator=(optional_copy_assign_base &&rhs) = default; +}; + +template +struct optional_move_assign_base : optional_copy_assign_base { + using optional_copy_assign_base::optional_copy_assign_base; +}; + +template +struct optional_move_assign_base : optional_copy_assign_base { + using optional_copy_assign_base::optional_copy_assign_base; + + __thrust_exec_check_disable__ + optional_move_assign_base() = default; + __thrust_exec_check_disable__ + optional_move_assign_base(const optional_move_assign_base &rhs) = default; + + __thrust_exec_check_disable__ + optional_move_assign_base(optional_move_assign_base &&rhs) = default; + + __thrust_exec_check_disable__ + optional_move_assign_base & + operator=(const optional_move_assign_base &rhs) = default; + + __thrust_exec_check_disable__ + __host__ __device__ + optional_move_assign_base & + operator=(optional_move_assign_base &&rhs) noexcept( + std::is_nothrow_move_constructible::value + &&std::is_nothrow_move_assignable::value) { + this->assign(std::move(rhs)); + return *this; + } +}; + +// optional_delete_ctor_base will conditionally delete copy and move +// constructors depending on whether T is copy/move constructible +template ::value, + bool EnableMove = std::is_move_constructible::value> +struct optional_delete_ctor_base { + __thrust_exec_check_disable__ + optional_delete_ctor_base() = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base(const optional_delete_ctor_base &) = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base(optional_delete_ctor_base &&) noexcept = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(const optional_delete_ctor_base &) = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(optional_delete_ctor_base &&) noexcept = default; +}; + +template struct optional_delete_ctor_base { + __thrust_exec_check_disable__ + optional_delete_ctor_base() = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base(const optional_delete_ctor_base &) = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base(optional_delete_ctor_base &&) noexcept = delete; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(const optional_delete_ctor_base &) = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(optional_delete_ctor_base &&) noexcept = default; +}; + +template struct optional_delete_ctor_base { + __thrust_exec_check_disable__ + optional_delete_ctor_base() = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base(const optional_delete_ctor_base &) = delete; + __thrust_exec_check_disable__ + optional_delete_ctor_base(optional_delete_ctor_base &&) noexcept = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(const optional_delete_ctor_base &) = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(optional_delete_ctor_base &&) noexcept = default; +}; + +template struct optional_delete_ctor_base { + __thrust_exec_check_disable__ + optional_delete_ctor_base() = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base(const optional_delete_ctor_base &) = delete; + __thrust_exec_check_disable__ + optional_delete_ctor_base(optional_delete_ctor_base &&) noexcept = delete; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(const optional_delete_ctor_base &) = default; + __thrust_exec_check_disable__ + optional_delete_ctor_base & + operator=(optional_delete_ctor_base &&) noexcept = default; +}; + +// optional_delete_assign_base will conditionally delete copy and move +// constructors depending on whether T is copy/move constructible + assignable +template ::value && + std::is_copy_assignable::value), + bool EnableMove = (std::is_move_constructible::value && + std::is_move_assignable::value)> +struct optional_delete_assign_base { + __thrust_exec_check_disable__ + optional_delete_assign_base() = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(const optional_delete_assign_base &) = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(optional_delete_assign_base &&) noexcept = + default; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(const optional_delete_assign_base &) = default; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(optional_delete_assign_base &&) noexcept = default; +}; + +template struct optional_delete_assign_base { + __thrust_exec_check_disable__ + optional_delete_assign_base() = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(const optional_delete_assign_base &) = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(optional_delete_assign_base &&) noexcept = + default; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(const optional_delete_assign_base &) = default; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(optional_delete_assign_base &&) noexcept = delete; +}; + +template struct optional_delete_assign_base { + __thrust_exec_check_disable__ + optional_delete_assign_base() = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(const optional_delete_assign_base &) = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(optional_delete_assign_base &&) noexcept = + default; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(const optional_delete_assign_base &) = delete; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(optional_delete_assign_base &&) noexcept = default; +}; + +template struct optional_delete_assign_base { + __thrust_exec_check_disable__ + optional_delete_assign_base() = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(const optional_delete_assign_base &) = default; + __thrust_exec_check_disable__ + optional_delete_assign_base(optional_delete_assign_base &&) noexcept = + default; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(const optional_delete_assign_base &) = delete; + __thrust_exec_check_disable__ + optional_delete_assign_base & + operator=(optional_delete_assign_base &&) noexcept = delete; +}; + +} // namespace detail + +/// \brief A tag type to represent an empty optional +struct nullopt_t { + struct do_not_use {}; + __host__ __device__ + constexpr explicit nullopt_t(do_not_use, do_not_use) noexcept {} +}; +/// \brief Represents an empty optional +/// \synopsis static constexpr nullopt_t nullopt; +/// +/// *Examples*: +/// ``` +/// thrust::optional a = thrust::nullopt; +/// void foo (thrust::optional); +/// foo(thrust::nullopt); //pass an empty optional +/// ``` +static constexpr nullopt_t nullopt{nullopt_t::do_not_use{}, + nullopt_t::do_not_use{}}; + +class bad_optional_access : public std::exception { +public: + bad_optional_access() = default; + __host__ + const char *what() const noexcept { return "Optional has no value"; } +}; + +/// An optional object is an object that contains the storage for another +/// object and manages the lifetime of this contained object, if any. The +/// contained object may be initialized after the optional object has been +/// initialized, and may be destroyed before the optional object has been +/// destroyed. The initialization state of the contained object is tracked by +/// the optional object. +template +class optional : private detail::optional_move_assign_base, + private detail::optional_delete_ctor_base, + private detail::optional_delete_assign_base { + using base = detail::optional_move_assign_base; + + static_assert(!std::is_same::value, + "instantiation of optional with in_place_t is ill-formed"); + static_assert(!std::is_same, nullopt_t>::value, + "instantiation of optional with nullopt_t is ill-formed"); + +public: +// The different versions for C++14 and 11 are needed because deduced return +// types are not SFINAE-safe. This provides better support for things like +// generic lambdas. C.f. +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0826r0 +#if defined(THRUST_OPTIONAL_CPP14) && !defined(THRUST_OPTIONAL_GCC49) && \ + !defined(THRUST_OPTIONAL_GCC54) && !defined(THRUST_OPTIONAL_GCC55) + /// \group and_then + /// Carries out some operation which returns an optional on the stored + /// object if there is one. \requires `std::invoke(std::forward(f), + /// value())` returns a `std::optional` for some `U`. \return Let `U` be + /// the result of `std::invoke(std::forward(f), value())`. Returns a + /// `std::optional`. The return value is empty if `*this` is empty, + /// otherwise the return value of `std::invoke(std::forward(f), value())` + /// is returned. + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto and_then(F &&f) & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto and_then(F &&f) && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto and_then(F &&f) const & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto and_then(F &&f) const && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : result(nullopt); + } +#endif +#else + /// \group and_then + /// Carries out some operation which returns an optional on the stored + /// object if there is one. \requires `std::invoke(std::forward(f), + /// value())` returns a `std::optional` for some `U`. + /// \return Let `U` be the result of `std::invoke(std::forward(f), + /// value())`. Returns a `std::optional`. The return value is empty if + /// `*this` is empty, otherwise the return value of + /// `std::invoke(std::forward(f), value())` is returned. + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR detail::invoke_result_t and_then(F &&f) & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR detail::invoke_result_t and_then(F &&f) && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr detail::invoke_result_t and_then(F &&f) const & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr detail::invoke_result_t and_then(F &&f) const && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : result(nullopt); + } +#endif +#endif + +#if defined(THRUST_OPTIONAL_CPP14) && !defined(THRUST_OPTIONAL_GCC49) && \ + !defined(THRUST_OPTIONAL_GCC54) && !defined(THRUST_OPTIONAL_GCC55) + /// \brief Carries out some operation on the stored object if there is one. + /// \return Let `U` be the result of `std::invoke(std::forward(f), + /// value())`. Returns a `std::optional`. The return value is empty if + /// `*this` is empty, otherwise an `optional` is constructed from the + /// return value of `std::invoke(std::forward(f), value())` and is + /// returned. + /// + /// \group map + /// \synopsis template constexpr auto map(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto map(F &&f) & { + return optional_map_impl(*this, std::forward(f)); + } + + /// \group map + /// \synopsis template constexpr auto map(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto map(F &&f) && { + return optional_map_impl(std::move(*this), std::forward(f)); + } + + /// \group map + /// \synopsis template constexpr auto map(F &&f) const&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto map(F &&f) const & { + return optional_map_impl(*this, std::forward(f)); + } + + /// \group map + /// \synopsis template constexpr auto map(F &&f) const&&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto map(F &&f) const && { + return optional_map_impl(std::move(*this), std::forward(f)); + } +#else + /// \brief Carries out some operation on the stored object if there is one. + /// \return Let `U` be the result of `std::invoke(std::forward(f), + /// value())`. Returns a `std::optional`. The return value is empty if + /// `*this` is empty, otherwise an `optional` is constructed from the + /// return value of `std::invoke(std::forward(f), value())` and is + /// returned. + /// + /// \group map + /// \synopsis template auto map(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR decltype(optional_map_impl(std::declval(), + std::declval())) + map(F &&f) & { + return optional_map_impl(*this, std::forward(f)); + } + + /// \group map + /// \synopsis template auto map(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR decltype(optional_map_impl(std::declval(), + std::declval())) + map(F &&f) && { + return optional_map_impl(std::move(*this), std::forward(f)); + } + + /// \group map + /// \synopsis template auto map(F &&f) const&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr decltype(optional_map_impl(std::declval(), + std::declval())) + map(F &&f) const & { + return optional_map_impl(*this, std::forward(f)); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group map + /// \synopsis template auto map(F &&f) const&&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr decltype(optional_map_impl(std::declval(), + std::declval())) + map(F &&f) const && { + return optional_map_impl(std::move(*this), std::forward(f)); + } +#endif +#endif + + /// \brief Calls `f` if the optional is empty + /// \requires `std::invoke_result_t` must be void or convertible to + /// `optional`. + /// \effects If `*this` has a value, returns `*this`. + /// Otherwise, if `f` returns `void`, calls `std::forward(f)` and returns + /// `std::nullopt`. Otherwise, returns `std::forward(f)()`. + /// + /// \group or_else + /// \synopsis template optional or_else (F &&f) &; + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) & { + if (has_value()) + return *this; + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) & { + return has_value() ? *this : std::forward(f)(); + } + + /// \group or_else + /// \synopsis template optional or_else (F &&f) &&; + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) && { + if (has_value()) + return std::move(*this); + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) && { + return has_value() ? std::move(*this) : std::forward(f)(); + } + + /// \group or_else + /// \synopsis template optional or_else (F &&f) const &; + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) const & { + if (has_value()) + return *this; + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) const & { + return has_value() ? *this : std::forward(f)(); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) const && { + if (has_value()) + return std::move(*this); + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) const && { + return has_value() ? std::move(*this) : std::forward(f)(); + } +#endif + + /// \brief Maps the stored value with `f` if there is one, otherwise returns + /// `u`. + /// + /// \details If there is a value stored, then `f` is called with `**this` + /// and the value is returned. Otherwise `u` is returned. + /// + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u); + } + + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u); + } + + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) const & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) const && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u); + } +#endif + + /// \brief Maps the stored value with `f` if there is one, otherwise calls + /// `u` and returns the result. + /// + /// \details If there is a value stored, then `f` is + /// called with `**this` and the value is returned. Otherwise + /// `std::forward(u)()` is returned. + /// + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u)(); + } + + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) + /// &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u)(); + } + + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) + /// const &; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) const & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u)(); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) + /// const &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) const && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u)(); + } +#endif + + /// \return `u` if `*this` has a value, otherwise an empty optional. + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr optional::type> conjunction(U &&u) const { + using result = optional>; + return has_value() ? result{u} : result{nullopt}; + } + + /// \return `rhs` if `*this` is empty, otherwise the current value. + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(const optional &rhs) & { + return has_value() ? *this : rhs; + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(const optional &rhs) const & { + return has_value() ? *this : rhs; + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(const optional &rhs) && { + return has_value() ? std::move(*this) : rhs; + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(const optional &rhs) const && { + return has_value() ? std::move(*this) : rhs; + } +#endif + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(optional &&rhs) & { + return has_value() ? *this : std::move(rhs); + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(optional &&rhs) const & { + return has_value() ? *this : std::move(rhs); + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(optional &&rhs) && { + return has_value() ? std::move(*this) : std::move(rhs); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(optional &&rhs) const && { + return has_value() ? std::move(*this) : std::move(rhs); + } +#endif + + /// Takes the value out of the optional, leaving it empty + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() & { + optional ret = *this; + reset(); + return ret; + } + + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() const & { + optional ret = *this; + reset(); + return ret; + } + + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() && { + optional ret = std::move(*this); + reset(); + return ret; + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() const && { + optional ret = std::move(*this); + reset(); + return ret; + } +#endif + + using value_type = T; + + /// Constructs an optional that does not contain a value. + /// \group ctor_empty + __thrust_exec_check_disable__ + constexpr optional() noexcept = default; + + /// \group ctor_empty + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional(nullopt_t) noexcept {} + + /// Copy constructor + /// + /// If `rhs` contains a value, the stored value is direct-initialized with + /// it. Otherwise, the constructed optional is empty. + __thrust_exec_check_disable__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional(const optional &rhs) = default; + + /// Move constructor + /// + /// If `rhs` contains a value, the stored value is direct-initialized with + /// it. Otherwise, the constructed optional is empty. + __thrust_exec_check_disable__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional(optional &&rhs) = default; + + /// Constructs the stored value in-place using the given arguments. + /// \group in_place + /// \synopsis template constexpr explicit optional(in_place_t, Args&&... args); + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr explicit optional( + detail::enable_if_t::value, in_place_t>, + Args &&... args) + : base(in_place, std::forward(args)...) {} + + /// \group in_place + /// \synopsis template \nconstexpr explicit optional(in_place_t, std::initializer_list&, Args&&... args); + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR explicit optional( + detail::enable_if_t &, + Args &&...>::value, + in_place_t>, + std::initializer_list il, Args &&... args) { + this->construct(il, std::forward(args)...); + } + + /// Constructs the stored value with `u`. + /// \synopsis template constexpr optional(U &&u); + __thrust_exec_check_disable__ + template < + class U = T, + detail::enable_if_t::value> * = nullptr, + detail::enable_forward_value * = nullptr> + __host__ __device__ + constexpr optional(U &&u) : base(in_place, std::forward(u)) {} + + /// \exclude + __thrust_exec_check_disable__ + template < + class U = T, + detail::enable_if_t::value> * = nullptr, + detail::enable_forward_value * = nullptr> + __host__ __device__ + constexpr explicit optional(U &&u) : base(in_place, std::forward(u)) {} + + /// Converting copy constructor. + /// \synopsis template optional(const optional &rhs); + __thrust_exec_check_disable__ + template < + class U, detail::enable_from_other * = nullptr, + detail::enable_if_t::value> * = nullptr> + __host__ __device__ + optional(const optional &rhs) { + this->construct(*rhs); + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr, + detail::enable_if_t::value> * = + nullptr> + __host__ __device__ + explicit optional(const optional &rhs) { + this->construct(*rhs); + } + + /// Converting move constructor. + /// \synopsis template optional(optional &&rhs); + __thrust_exec_check_disable__ + template < + class U, detail::enable_from_other * = nullptr, + detail::enable_if_t::value> * = nullptr> + __host__ __device__ + optional(optional &&rhs) { + this->construct(std::move(*rhs)); + } + + /// \exclude + __thrust_exec_check_disable__ + template < + class U, detail::enable_from_other * = nullptr, + detail::enable_if_t::value> * = nullptr> + __host__ __device__ + explicit optional(optional &&rhs) { + this->construct(std::move(*rhs)); + } + + /// Destroys the stored value if there is one. + __thrust_exec_check_disable__ + ~optional() = default; + + /// Assignment to empty. + /// + /// Destroys the current value if there is one. + __thrust_exec_check_disable__ + __host__ __device__ + optional &operator=(nullopt_t) noexcept { + if (has_value()) { + this->m_value.~T(); + this->m_has_value = false; + } + + return *this; + } + + /// Copy assignment. + /// + /// Copies the value from `rhs` if there is one. Otherwise resets the stored + /// value in `*this`. + __thrust_exec_check_disable__ + optional &operator=(const optional &rhs) = default; + + /// Move assignment. + /// + /// Moves the value from `rhs` if there is one. Otherwise resets the stored + /// value in `*this`. + __thrust_exec_check_disable__ + optional &operator=(optional &&rhs) = default; + + /// Assigns the stored value from `u`, destroying the old value if there was + /// one. + /// \synopsis optional &operator=(U &&u); + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional &operator=(U &&u) { + if (has_value()) { + this->m_value = std::forward(u); + } else { + this->construct(std::forward(u)); + } + + return *this; + } + + /// Converting copy assignment operator. + /// + /// Copies the value from `rhs` if there is one. Otherwise resets the stored + /// value in `*this`. + /// \synopsis optional &operator=(const optional & rhs); + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional &operator=(const optional &rhs) { + if (has_value()) { + if (rhs.has_value()) { + this->m_value = *rhs; + } else { + this->hard_reset(); + } + } + + if (rhs.has_value()) { + this->construct(*rhs); + } + + return *this; + } + + // TODO check exception guarantee + /// Converting move assignment operator. + /// + /// Moves the value from `rhs` if there is one. Otherwise resets the stored + /// value in `*this`. + /// \synopsis optional &operator=(optional && rhs); + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional &operator=(optional &&rhs) { + if (has_value()) { + if (rhs.has_value()) { + this->m_value = std::move(*rhs); + } else { + this->hard_reset(); + } + } + + if (rhs.has_value()) { + this->construct(std::move(*rhs)); + } + + return *this; + } + + /// Constructs the value in-place, destroying the current one if there is + /// one. + /// \group emplace + __thrust_exec_check_disable__ + template + __host__ __device__ + T &emplace(Args &&... args) { + static_assert(std::is_constructible::value, + "T must be constructible with Args"); + + *this = nullopt; + this->construct(std::forward(args)...); + return this->m_value; + } + + /// \group emplace + /// \synopsis template \nT& emplace(std::initializer_list il, Args &&... args); + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::enable_if_t< + std::is_constructible &, Args &&...>::value, + T &> + emplace(std::initializer_list il, Args &&... args) { + *this = nullopt; + this->construct(il, std::forward(args)...); + return this->m_value; + } + + /// Swaps this optional with the other. + /// + /// If neither optionals have a value, nothing happens. + /// If both have a value, the values are swapped. + /// If one has a value, it is moved to the other and the movee is left + /// valueless. + __thrust_exec_check_disable__ + __host__ __device__ + void + swap(optional &rhs) noexcept(std::is_nothrow_move_constructible::value + &&detail::is_nothrow_swappable::value) { + if (has_value()) { + if (rhs.has_value()) { + using thrust::swap; + swap(**this, *rhs); + } else { + new (addressof(rhs.m_value)) T(std::move(this->m_value)); + this->m_value.T::~T(); + } + } else if (rhs.has_value()) { + new (addressof(this->m_value)) T(std::move(rhs.m_value)); + rhs.m_value.T::~T(); + } + } + + /// \return a pointer to the stored value + /// \requires a value is stored + /// \group pointer + /// \synopsis constexpr const T *operator->() const; + __thrust_exec_check_disable__ + __host__ __device__ + constexpr const T *operator->() const { + return addressof(this->m_value); + } + + /// \group pointer + /// \synopsis constexpr T *operator->(); + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T *operator->() { + return addressof(this->m_value); + } + + /// \return the stored value + /// \requires a value is stored + /// \group deref + /// \synopsis constexpr T &operator*(); + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &operator*() & { return this->m_value; } + + /// \group deref + /// \synopsis constexpr const T &operator*() const; + __thrust_exec_check_disable__ + __host__ __device__ + constexpr const T &operator*() const & { return this->m_value; } + + /// \exclude + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &&operator*() && { + return std::move(this->m_value); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \exclude + __thrust_exec_check_disable__ + __host__ __device__ + constexpr const T &&operator*() const && { return std::move(this->m_value); } +#endif + + /// \return whether or not the optional has a value + /// \group has_value + __thrust_exec_check_disable__ + __host__ __device__ + constexpr bool has_value() const noexcept { return this->m_has_value; } + + /// \group has_value + __thrust_exec_check_disable__ + __host__ __device__ + constexpr explicit operator bool() const noexcept { + return this->m_has_value; + } + + /// \return the contained value if there is one, otherwise throws + /// [bad_optional_access] + /// \group value + /// \synopsis constexpr T &value(); + __host__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &value() & { + if (has_value()) + return this->m_value; + throw bad_optional_access(); + } + /// \group value + /// \synopsis constexpr const T &value() const; + __host__ + THRUST_OPTIONAL_CPP11_CONSTEXPR const T &value() const & { + if (has_value()) + return this->m_value; + throw bad_optional_access(); + } + /// \exclude + __host__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &&value() && { + if (has_value()) + return std::move(this->m_value); + throw bad_optional_access(); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \exclude + __host__ + THRUST_OPTIONAL_CPP11_CONSTEXPR const T &&value() const && { + if (has_value()) + return std::move(this->m_value); + throw bad_optional_access(); + } +#endif + + /// \return the stored value if there is one, otherwise returns `u` + /// \group value_or + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr T value_or(U &&u) const & { + static_assert(std::is_copy_constructible::value && + std::is_convertible::value, + "T must be copy constructible and convertible from U"); + return has_value() ? **this : static_cast(std::forward(u)); + } + + /// \group value_or + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T value_or(U &&u) && { + static_assert(std::is_move_constructible::value && + std::is_convertible::value, + "T must be move constructible and convertible from U"); + return has_value() ? **this : static_cast(std::forward(u)); + } + + /// Destroys the stored value if one exists, making the optional empty + __thrust_exec_check_disable__ + __host__ __device__ + void reset() noexcept { + if (has_value()) { + this->m_value.~T(); + this->m_has_value = false; + } + } +}; + +/// \group relop +/// \brief Compares two optional objects +/// \details If both optionals contain a value, they are compared with `T`s +/// relational operators. Otherwise `lhs` and `rhs` are equal only if they are +/// both empty, and `lhs` is less than `rhs` only if `rhs` is empty and `lhs` +/// is not. +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator==(const optional &lhs, + const optional &rhs) { + return lhs.has_value() == rhs.has_value() && + (!lhs.has_value() || *lhs == *rhs); +} +/// \group relop +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator!=(const optional &lhs, + const optional &rhs) { + return lhs.has_value() != rhs.has_value() || + (lhs.has_value() && *lhs != *rhs); +} +/// \group relop +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<(const optional &lhs, + const optional &rhs) { + return rhs.has_value() && (!lhs.has_value() || *lhs < *rhs); +} +/// \group relop +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>(const optional &lhs, + const optional &rhs) { + return lhs.has_value() && (!rhs.has_value() || *lhs > *rhs); +} +/// \group relop +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<=(const optional &lhs, + const optional &rhs) { + return !lhs.has_value() || (rhs.has_value() && *lhs <= *rhs); +} +/// \group relop +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>=(const optional &lhs, + const optional &rhs) { + return !rhs.has_value() || (lhs.has_value() && *lhs >= *rhs); +} + +/// \group relop_nullopt +/// \brief Compares an optional to a `nullopt` +/// \details Equivalent to comparing the optional to an empty optional +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator==(const optional &lhs, nullopt_t) noexcept { + return !lhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator==(nullopt_t, const optional &rhs) noexcept { + return !rhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator!=(const optional &lhs, nullopt_t) noexcept { + return lhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator!=(nullopt_t, const optional &rhs) noexcept { + return rhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<(const optional &, nullopt_t) noexcept { + return false; +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<(nullopt_t, const optional &rhs) noexcept { + return rhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<=(const optional &lhs, nullopt_t) noexcept { + return !lhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<=(nullopt_t, const optional &) noexcept { + return true; +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>(const optional &lhs, nullopt_t) noexcept { + return lhs.has_value(); +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>(nullopt_t, const optional &) noexcept { + return false; +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>=(const optional &, nullopt_t) noexcept { + return true; +} +/// \group relop_nullopt +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>=(nullopt_t, const optional &rhs) noexcept { + return !rhs.has_value(); +} + +/// \group relop_t +/// \brief Compares the optional with a value. +/// \details If the optional has a value, it is compared with the other value +/// using `T`s relational operators. Otherwise, the optional is considered +/// less than the value. +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator==(const optional &lhs, const U &rhs) { + return lhs.has_value() ? *lhs == rhs : false; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator==(const U &lhs, const optional &rhs) { + return rhs.has_value() ? lhs == *rhs : false; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator!=(const optional &lhs, const U &rhs) { + return lhs.has_value() ? *lhs != rhs : true; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator!=(const U &lhs, const optional &rhs) { + return rhs.has_value() ? lhs != *rhs : true; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<(const optional &lhs, const U &rhs) { + return lhs.has_value() ? *lhs < rhs : true; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<(const U &lhs, const optional &rhs) { + return rhs.has_value() ? lhs < *rhs : false; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<=(const optional &lhs, const U &rhs) { + return lhs.has_value() ? *lhs <= rhs : true; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator<=(const U &lhs, const optional &rhs) { + return rhs.has_value() ? lhs <= *rhs : false; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>(const optional &lhs, const U &rhs) { + return lhs.has_value() ? *lhs > rhs : false; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>(const U &lhs, const optional &rhs) { + return rhs.has_value() ? lhs > *rhs : true; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>=(const optional &lhs, const U &rhs) { + return lhs.has_value() ? *lhs >= rhs : false; +} +/// \group relop_t +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr bool operator>=(const U &lhs, const optional &rhs) { + return rhs.has_value() ? lhs >= *rhs : true; +} + +/// \synopsis template \nvoid swap(optional &lhs, optional &rhs); +__thrust_exec_check_disable__ +template ::value> * = nullptr, + detail::enable_if_t::value> * = nullptr> +__host__ __device__ +void swap(optional &lhs, + optional &rhs) noexcept(noexcept(lhs.swap(rhs))) { + return lhs.swap(rhs); +} + +namespace detail { +struct i_am_secret {}; +} // namespace detail + +__thrust_exec_check_disable__ +template ::value, + detail::decay_t, T>> +__host__ __device__ +inline constexpr optional make_optional(U &&v) { + return optional(std::forward(v)); +} + +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr optional make_optional(Args &&... args) { + return optional(in_place, std::forward(args)...); +} +__thrust_exec_check_disable__ +template +__host__ __device__ +inline constexpr optional make_optional(std::initializer_list il, + Args &&... args) { + return optional(in_place, il, std::forward(args)...); +} + +#if THRUST_CPP_DIALECT >= 2017 +template optional(T)->optional; +#endif + +// Doxygen chokes on the trailing return types used below. +#if !defined(THRUST_DOXYGEN) +/// \exclude +namespace detail { +#ifdef THRUST_OPTIONAL_CPP14 +__thrust_exec_check_disable__ +template (), + *std::declval())), + detail::enable_if_t::value> * = nullptr> +__host__ __device__ +constexpr auto optional_map_impl(Opt &&opt, F &&f) { + return opt.has_value() + ? detail::invoke(std::forward(f), *std::forward(opt)) + : optional(nullopt); +} + +__thrust_exec_check_disable__ +template (), + *std::declval())), + detail::enable_if_t::value> * = nullptr> +__host__ __device__ +auto optional_map_impl(Opt &&opt, F &&f) { + if (opt.has_value()) { + detail::invoke(std::forward(f), *std::forward(opt)); + return make_optional(monostate{}); + } + + return optional(nullopt); +} +#else +__thrust_exec_check_disable__ +template (), + *std::declval())), + detail::enable_if_t::value> * = nullptr> +__host__ __device__ +constexpr optional optional_map_impl(Opt &&opt, F &&f) { + return opt.has_value() + ? detail::invoke(std::forward(f), *std::forward(opt)) + : optional(nullopt); +} + +__thrust_exec_check_disable__ +template (), + *std::declval())), + detail::enable_if_t::value> * = nullptr> +__host__ __device__ +auto optional_map_impl(Opt &&opt, F &&f) -> optional +{ + if (opt.has_value()) { + detail::invoke(std::forward(f), *std::forward(opt)); + return monostate{}; + } + + return nullopt; +} +#endif +} // namespace detail +#endif // !defined(THRUST_DOXYGEN) + +/// Specialization for when `T` is a reference. `optional` acts similarly +/// to a `T*`, but provides more operations and shows intent more clearly. +/// +/// *Examples*: +/// +/// ``` +/// int i = 42; +/// thrust::optional o = i; +/// *o == 42; //true +/// i = 12; +/// *o = 12; //true +/// &*o == &i; //true +/// ``` +/// +/// Assignment has rebind semantics rather than assign-through semantics: +/// +/// ``` +/// int j = 8; +/// o = j; +/// +/// &*o == &j; //true +/// ``` +template class optional { +public: +// The different versions for C++14 and 11 are needed because deduced return +// types are not SFINAE-safe. This provides better support for things like +// generic lambdas. C.f. +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0826r0 +#if defined(THRUST_OPTIONAL_CPP14) && !defined(THRUST_OPTIONAL_GCC49) && \ + !defined(THRUST_OPTIONAL_GCC54) && !defined(THRUST_OPTIONAL_GCC55) + /// \group and_then + /// Carries out some operation which returns an optional on the stored + /// object if there is one. \requires `std::invoke(std::forward(f), + /// value())` returns a `std::optional` for some `U`. \return Let `U` be + /// the result of `std::invoke(std::forward(f), value())`. Returns a + /// `std::optional`. The return value is empty if `*this` is empty, + /// otherwise the return value of `std::invoke(std::forward(f), value())` + /// is returned. + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto and_then(F &&f) & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto and_then(F &&f) && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto and_then(F &&f) const & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto and_then(F &&f) const && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } +#endif +#else + /// \group and_then + /// Carries out some operation which returns an optional on the stored + /// object if there is one. \requires `std::invoke(std::forward(f), + /// value())` returns a `std::optional` for some `U`. \return Let `U` be + /// the result of `std::invoke(std::forward(f), value())`. Returns a + /// `std::optional`. The return value is empty if `*this` is empty, + /// otherwise the return value of `std::invoke(std::forward(f), value())` + /// is returned. + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR detail::invoke_result_t and_then(F &&f) & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR detail::invoke_result_t and_then(F &&f) && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr detail::invoke_result_t and_then(F &&f) const & { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group and_then + /// \synopsis template \nconstexpr auto and_then(F &&f) const &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr detail::invoke_result_t and_then(F &&f) const && { + using result = detail::invoke_result_t; + static_assert(detail::is_optional::value, + "F must return an optional"); + + return has_value() ? detail::invoke(std::forward(f), **this) + : result(nullopt); + } +#endif +#endif + +#if defined(THRUST_OPTIONAL_CPP14) && !defined(THRUST_OPTIONAL_GCC49) && \ + !defined(THRUST_OPTIONAL_GCC54) && !defined(THRUST_OPTIONAL_GCC55) + /// \brief Carries out some operation on the stored object if there is one. + /// \return Let `U` be the result of `std::invoke(std::forward(f), + /// value())`. Returns a `std::optional`. The return value is empty if + /// `*this` is empty, otherwise an `optional` is constructed from the + /// return value of `std::invoke(std::forward(f), value())` and is + /// returned. + /// + /// \group map + /// \synopsis template constexpr auto map(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto map(F &&f) & { + return detail::optional_map_impl(*this, std::forward(f)); + } + + /// \group map + /// \synopsis template constexpr auto map(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR auto map(F &&f) && { + return detail::optional_map_impl(std::move(*this), std::forward(f)); + } + + /// \group map + /// \synopsis template constexpr auto map(F &&f) const&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto map(F &&f) const & { + return detail::optional_map_impl(*this, std::forward(f)); + } + + /// \group map + /// \synopsis template constexpr auto map(F &&f) const&&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr auto map(F &&f) const && { + return detail::optional_map_impl(std::move(*this), std::forward(f)); + } +#else + /// \brief Carries out some operation on the stored object if there is one. + /// \return Let `U` be the result of `std::invoke(std::forward(f), + /// value())`. Returns a `std::optional`. The return value is empty if + /// `*this` is empty, otherwise an `optional` is constructed from the + /// return value of `std::invoke(std::forward(f), value())` and is + /// returned. + /// + /// \group map + /// \synopsis template auto map(F &&f) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR decltype(detail::optional_map_impl(std::declval(), + std::declval())) + map(F &&f) & { + return detail::optional_map_impl(*this, std::forward(f)); + } + + /// \group map + /// \synopsis template auto map(F &&f) &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR decltype(detail::optional_map_impl(std::declval(), + std::declval())) + map(F &&f) && { + return detail::optional_map_impl(std::move(*this), std::forward(f)); + } + + /// \group map + /// \synopsis template auto map(F &&f) const&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr decltype(detail::optional_map_impl(std::declval(), + std::declval())) + map(F &&f) const & { + return detail::optional_map_impl(*this, std::forward(f)); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group map + /// \synopsis template auto map(F &&f) const&&; + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr decltype(detail::optional_map_impl(std::declval(), + std::declval())) + map(F &&f) const && { + return detail::optional_map_impl(std::move(*this), std::forward(f)); + } +#endif +#endif + + /// \brief Calls `f` if the optional is empty + /// \requires `std::invoke_result_t` must be void or convertible to + /// `optional`. \effects If `*this` has a value, returns `*this`. + /// Otherwise, if `f` returns `void`, calls `std::forward(f)` and returns + /// `std::nullopt`. Otherwise, returns `std::forward(f)()`. + /// + /// \group or_else + /// \synopsis template optional or_else (F &&f) &; + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional + THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) & { + if (has_value()) + return *this; + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional + THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) & { + return has_value() ? *this : std::forward(f)(); + } + + /// \group or_else + /// \synopsis template optional or_else (F &&f) &&; + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) && { + if (has_value()) + return std::move(*this); + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) && { + return has_value() ? std::move(*this) : std::forward(f)(); + } + + /// \group or_else + /// \synopsis template optional or_else (F &&f) const &; + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) const & { + if (has_value()) + return *this; + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional THRUST_OPTIONAL_CPP11_CONSTEXPR or_else(F &&f) const & { + return has_value() ? *this : std::forward(f)(); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) const && { + if (has_value()) + return std::move(*this); + + std::forward(f)(); + return nullopt; + } + + /// \exclude + __thrust_exec_check_disable__ + template * = nullptr> + __host__ __device__ + optional or_else(F &&f) const && { + return has_value() ? std::move(*this) : std::forward(f)(); + } +#endif + + /// \brief Maps the stored value with `f` if there is one, otherwise returns + /// `u`. + /// + /// \details If there is a value stored, then `f` is called with `**this` + /// and the value is returned. Otherwise `u` is returned. + /// + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u); + } + + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u); + } + + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) const & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group map_or + __thrust_exec_check_disable__ + template + __host__ __device__ + U map_or(F &&f, U &&u) const && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u); + } +#endif + + /// \brief Maps the stored value with `f` if there is one, otherwise calls + /// `u` and returns the result. + /// + /// \details If there is a value stored, then `f` is + /// called with `**this` and the value is returned. Otherwise + /// `std::forward(u)()` is returned. + /// + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) &; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u)(); + } + + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) + /// &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u)(); + } + + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) + /// const &; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) const & { + return has_value() ? detail::invoke(std::forward(f), **this) + : std::forward(u)(); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group map_or_else + /// \synopsis template \nauto map_or_else(F &&f, U &&u) + /// const &&; + __thrust_exec_check_disable__ + template + __host__ __device__ + detail::invoke_result_t map_or_else(F &&f, U &&u) const && { + return has_value() ? detail::invoke(std::forward(f), std::move(**this)) + : std::forward(u)(); + } +#endif + + /// \return `u` if `*this` has a value, otherwise an empty optional. + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr optional::type> conjunction(U &&u) const { + using result = optional>; + return has_value() ? result{u} : result{nullopt}; + } + + /// \return `rhs` if `*this` is empty, otherwise the current value. + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(const optional &rhs) & { + return has_value() ? *this : rhs; + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(const optional &rhs) const & { + return has_value() ? *this : rhs; + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(const optional &rhs) && { + return has_value() ? std::move(*this) : rhs; + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(const optional &rhs) const && { + return has_value() ? std::move(*this) : rhs; + } +#endif + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(optional &&rhs) & { + return has_value() ? *this : std::move(rhs); + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(optional &&rhs) const & { + return has_value() ? *this : std::move(rhs); + } + + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional disjunction(optional &&rhs) && { + return has_value() ? std::move(*this) : std::move(rhs); + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group disjunction + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional disjunction(optional &&rhs) const && { + return has_value() ? std::move(*this) : std::move(rhs); + } +#endif + + /// Takes the value out of the optional, leaving it empty + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() & { + optional ret = *this; + reset(); + return ret; + } + + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() const & { + optional ret = *this; + reset(); + return ret; + } + + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() && { + optional ret = std::move(*this); + reset(); + return ret; + } + +#ifndef THRUST_OPTIONAL_NO_CONSTRR + /// \group take + __thrust_exec_check_disable__ + __host__ __device__ + optional take() const && { + optional ret = std::move(*this); + reset(); + return ret; + } +#endif + + using value_type = T &; + + /// Constructs an optional that does not contain a value. + /// \group ctor_empty + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional() noexcept : m_value(nullptr) {} + + /// \group ctor_empty + __thrust_exec_check_disable__ + __host__ __device__ + constexpr optional(nullopt_t) noexcept : m_value(nullptr) {} + + /// Copy constructor + /// + /// If `rhs` contains a value, the stored value is direct-initialized with + /// it. Otherwise, the constructed optional is empty. + __thrust_exec_check_disable__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional(const optional &rhs) noexcept = default; + + /// Move constructor + /// + /// If `rhs` contains a value, the stored value is direct-initialized with + /// it. Otherwise, the constructed optional is empty. + __thrust_exec_check_disable__ + THRUST_OPTIONAL_CPP11_CONSTEXPR optional(optional &&rhs) = default; + + /// Constructs the stored value with `u`. + /// \synopsis template constexpr optional(U &&u); + __thrust_exec_check_disable__ + template >::value> + * = nullptr> + __host__ __device__ + constexpr optional(U &&u) : m_value(addressof(u)) { + static_assert(std::is_lvalue_reference::value, "U must be an lvalue"); + } + + /// \exclude + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr explicit optional(const optional &rhs) : optional(*rhs) {} + + /// No-op + __thrust_exec_check_disable__ + ~optional() = default; + + /// Assignment to empty. + /// + /// Destroys the current value if there is one. + __thrust_exec_check_disable__ + __host__ __device__ + optional &operator=(nullopt_t) noexcept { + m_value = nullptr; + return *this; + } + + /// Copy assignment. + /// + /// Rebinds this optional to the referee of `rhs` if there is one. Otherwise + /// resets the stored value in `*this`. + __thrust_exec_check_disable__ + optional &operator=(const optional &rhs) = default; + + /// Rebinds this optional to `u`. + /// + /// \requires `U` must be an lvalue reference. + /// \synopsis optional &operator=(U &&u); + __thrust_exec_check_disable__ + template >::value> + * = nullptr> + __host__ __device__ + optional &operator=(U &&u) { + static_assert(std::is_lvalue_reference::value, "U must be an lvalue"); + m_value = addressof(u); + return *this; + } + + /// Converting copy assignment operator. + /// + /// Rebinds this optional to the referee of `rhs` if there is one. Otherwise + /// resets the stored value in `*this`. + __thrust_exec_check_disable__ + template + __host__ __device__ + optional &operator=(const optional &rhs) { + m_value = addressof(rhs.value()); + return *this; + } + + /// Constructs the value in-place, destroying the current one if there is + /// one. + /// + /// \group emplace + __thrust_exec_check_disable__ + template + __host__ __device__ + T &emplace(Args &&... args) noexcept { + static_assert(std::is_constructible::value, + "T must be constructible with Args"); + + *this = nullopt; + this->construct(std::forward(args)...); + } + + /// Swaps this optional with the other. + /// + /// If neither optionals have a value, nothing happens. + /// If both have a value, the values are swapped. + /// If one has a value, it is moved to the other and the movee is left + /// valueless. + __thrust_exec_check_disable__ + __host__ __device__ + void swap(optional &rhs) noexcept { std::swap(m_value, rhs.m_value); } + + /// \return a pointer to the stored value + /// \requires a value is stored + /// \group pointer + /// \synopsis constexpr const T *operator->() const; + __thrust_exec_check_disable__ + __host__ __device__ + constexpr const T *operator->() const { return m_value; } + + /// \group pointer + /// \synopsis constexpr T *operator->(); + __thrust_exec_check_disable__ + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T *operator->() { return m_value; } + + /// \return the stored value + /// \requires a value is stored + /// \group deref + /// \synopsis constexpr T &operator*(); + __thrust_exec_check_disable__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &operator*() { return *m_value; } + + /// \group deref + /// \synopsis constexpr const T &operator*() const; + __thrust_exec_check_disable__ + __host__ __device__ + constexpr const T &operator*() const { return *m_value; } + + /// \return whether or not the optional has a value + /// \group has_value + __thrust_exec_check_disable__ + __host__ __device__ + constexpr bool has_value() const noexcept { return m_value != nullptr; } + + /// \group has_value + __thrust_exec_check_disable__ + __host__ __device__ + constexpr explicit operator bool() const noexcept { + return m_value != nullptr; + } + + /// \return the contained value if there is one, otherwise throws + /// [bad_optional_access] + /// \group value + /// synopsis constexpr T &value(); + __host__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T &value() { + if (has_value()) + return *m_value; + throw bad_optional_access(); + } + /// \group value + /// \synopsis constexpr const T &value() const; + __host__ + THRUST_OPTIONAL_CPP11_CONSTEXPR const T &value() const { + if (has_value()) + return *m_value; + throw bad_optional_access(); + } + + /// \return the stored value if there is one, otherwise returns `u` + /// \group value_or + __thrust_exec_check_disable__ + template + __host__ __device__ + constexpr T value_or(U &&u) const & { + static_assert(std::is_copy_constructible::value && + std::is_convertible::value, + "T must be copy constructible and convertible from U"); + return has_value() ? **this : static_cast(std::forward(u)); + } + + /// \group value_or + __thrust_exec_check_disable__ + template + __host__ __device__ + THRUST_OPTIONAL_CPP11_CONSTEXPR T value_or(U &&u) && { + static_assert(std::is_move_constructible::value && + std::is_convertible::value, + "T must be move constructible and convertible from U"); + return has_value() ? **this : static_cast(std::forward(u)); + } + + /// Destroys the stored value if one exists, making the optional empty + __thrust_exec_check_disable__ + void reset() noexcept { m_value = nullptr; } + +private: + T *m_value; +}; + +THRUST_NAMESPACE_END + +namespace std { +// TODO SFINAE +template struct hash> { + __thrust_exec_check_disable__ + __host__ __device__ + ::std::size_t operator()(const THRUST_NS_QUALIFIER::optional &o) const { + if (!o.has_value()) + return 0; + + return std::hash>()(*o); + } +}; +} // namespace std + +#endif // THRUST_CPP_DIALECT >= 2011 + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/pair.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/pair.h new file mode 100644 index 0000000000000000000000000000000000000000..eb2138aaf2ca348c108aaeb9c423cc70b2fe0faf --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/pair.h @@ -0,0 +1,281 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file pair.h + * \brief A type encapsulating a heterogeneous pair of elements + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup utility + * \{ + */ + +/*! \addtogroup pair + * \{ + */ + +/*! \p pair is a generic data structure encapsulating a heterogeneous + * pair of values. + * + * \tparam T1 The type of \p pair's first object type. There are no + * requirements on the type of \p T1. T1's type is + * provided by pair::first_type. + * + * \tparam T2 The type of \p pair's second object type. There are no + * requirements on the type of \p T2. T2's type is + * provided by pair::second_type. + */ +template + struct pair +{ + /*! \p first_type is the type of \p pair's first object type. + */ + typedef T1 first_type; + + /*! \p second_type is the type of \p pair's second object type. + */ + typedef T2 second_type; + + /*! The \p pair's first object. + */ + first_type first; + + /*! The \p pair's second object. + */ + second_type second; + + /*! \p pair's default constructor constructs \p first + * and \p second using \c first_type & \c second_type's + * default constructors, respectively. + */ + __host__ __device__ pair(void); + + /*! This constructor accepts two objects to copy into this \p pair. + * + * \param x The object to copy into \p first. + * \param y The object to copy into \p second. + */ + inline __host__ __device__ + pair(const T1 &x, const T2 &y); + + /*! This copy constructor copies from a \p pair whose types are + * convertible to this \p pair's \c first_type and \c second_type, + * respectively. + * + * \param p The \p pair to copy from. + * + * \tparam U1 is convertible to \c first_type. + * \tparam U2 is convertible to \c second_type. + */ + template + inline __host__ __device__ + pair(const pair &p); + + /*! This copy constructor copies from a std::pair whose types are + * convertible to this \p pair's \c first_type and \c second_type, + * respectively. + * + * \param p The std::pair to copy from. + * + * \tparam U1 is convertible to \c first_type. + * \tparam U2 is convertible to \c second_type. + */ + template + inline __host__ __device__ + pair(const std::pair &p); + + /*! \p swap swaps the elements of two pairs. + * + * \param p The other pair with which to swap. + */ + inline __host__ __device__ + void swap(pair &p); +}; // end pair + + +/*! This operator tests two \p pairs for equality. + * + * \param x The first \p pair to compare. + * \param y The second \p pair to compare. + * \return \c true if and only if x.first == y.first && x.second == y.second. + * + * \tparam T1 is a model of Equality Comparable. + * \tparam T2 is a model of Equality Comparable. + */ +template + inline __host__ __device__ + bool operator==(const pair &x, const pair &y); + + +/*! This operator tests two pairs for ascending ordering. + * + * \param x The first \p pair to compare. + * \param y The second \p pair to compare. + * \return \c true if and only if x.first < y.first || (!(y.first < x.first) && x.second < y.second). + * + * \tparam T1 is a model of LessThan Comparable. + * \tparam T2 is a model of LessThan Comparable. + */ +template + inline __host__ __device__ + bool operator<(const pair &x, const pair &y); + + +/*! This operator tests two pairs for inequality. + * + * \param x The first \p pair to compare. + * \param y The second \p pair to compare. + * \return \c true if and only if !(x == y). + * + * \tparam T1 is a model of Equality Comparable. + * \tparam T2 is a model of Equality Comparable. + */ +template + inline __host__ __device__ + bool operator!=(const pair &x, const pair &y); + + +/*! This operator tests two pairs for descending ordering. + * + * \param x The first \p pair to compare. + * \param y The second \p pair to compare. + * \return \c true if and only if y < x. + * + * \tparam T1 is a model of LessThan Comparable. + * \tparam T2 is a model of LessThan Comparable. + */ +template + inline __host__ __device__ + bool operator>(const pair &x, const pair &y); + + +/*! This operator tests two pairs for ascending ordering or equivalence. + * + * \param x The first \p pair to compare. + * \param y The second \p pair to compare. + * \return \c true if and only if !(y < x). + * + * \tparam T1 is a model of LessThan Comparable. + * \tparam T2 is a model of LessThan Comparable. + */ +template + inline __host__ __device__ + bool operator<=(const pair &x, const pair &y); + + +/*! This operator tests two pairs for descending ordering or equivalence. + * + * \param x The first \p pair to compare. + * \param y The second \p pair to compare. + * \return \c true if and only if !(x < y). + * + * \tparam T1 is a model of LessThan Comparable. + * \tparam T2 is a model of LessThan Comparable. + */ +template + inline __host__ __device__ + bool operator>=(const pair &x, const pair &y); + + +/*! \p swap swaps the contents of two pairs. + * + * \param x The first \p pair to swap. + * \param y The second \p pair to swap. + */ +template + inline __host__ __device__ + void swap(pair &x, pair &y); + + +/*! This convenience function creates a \p pair from two objects. + * + * \param x The first object to copy from. + * \param y The second object to copy from. + * \return A newly-constructed \p pair copied from \p a and \p b. + * + * \tparam T1 There are no requirements on the type of \p T1. + * \tparam T2 There are no requirements on the type of \p T2. + */ +template + inline __host__ __device__ + pair make_pair(T1 x, T2 y); + + +/*! This convenience metafunction is included for compatibility with + * \p tuple. It returns either the type of a \p pair's + * \c first_type or \c second_type in its nested type, \c type. + * + * \tparam N This parameter selects the member of interest. + * \tparam T A \c pair type of interest. + */ +template struct tuple_element; + + +/*! This convenience metafunction is included for compatibility with + * \p tuple. It returns \c 2, the number of elements of a \p pair, + * in its nested data member, \c value. + * + * \tparam Pair A \c pair type of interest. + */ +template struct tuple_size; + + +/*! This convenience function returns a reference to either the first or + * second member of a \p pair. + * + * \param p The \p pair of interest. + * \return \c p.first or \c p.second, depending on the template + * parameter. + * + * \tparam N This parameter selects the member of interest. + */ +// XXX comment out these prototypes as a WAR to a problem on MSVC 2005 +//template +// inline __host__ __device__ +// typename tuple_element >::type & +// get(pair &p); + + +/*! This convenience function returns a const reference to either the + * first or second member of a \p pair. + * + * \param p The \p pair of interest. + * \return \c p.first or \c p.second, depending on the template + * parameter. + * + * \tparam i This parameter selects the member of interest. + */ +// XXX comment out these prototypes as a WAR to a problem on MSVC 2005 +//template +// inline __host__ __device__ +// const typename tuple_element >::type & +// get(const pair &p); + +/*! \} // pair + */ + +/*! \} // utility + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/partition.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/partition.h new file mode 100644 index 0000000000000000000000000000000000000000..90768f2467668726fc5fd86d974d19bb07bfa125 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/partition.h @@ -0,0 +1,1436 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file partition.h + * \brief Reorganizes a range based on a predicate + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reordering + * \ingroup algorithms + * + * \addtogroup partitioning + * \ingroup reordering + * \{ + */ + + +/*! \p partition reorders the elements [first, last) based on the function + * object \p pred, such that all of the elements that satisfy \p pred precede the + * elements that fail to satisfy it. The postcondition is that, for some iterator + * \c middle in the range [first, last), pred(*i) is \c true for every + * iterator \c i in the range [first,middle) and \c false for every iterator + * \c i in the range [middle, last). The return value of \p partition is + * \c middle. + * + * Note that the relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition, does guarantee to preserve the relative order. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements which do not satisfy \p pred. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type, + * and \p ForwardIterator is mutable. + * \tparam Predicate is a model of Predicate. + * + * The following code snippet demonstrates how to use \p partition to reorder a + * sequence so that even numbers precede odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::partition(thrust::host, + * A, A + N, + * is_even()); + * // A is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partition + * \see \p stable_partition + * \see \p partition_copy + */ +template +__host__ __device__ + ForwardIterator partition(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p partition reorders the elements [first, last) based on the function + * object \p pred, such that all of the elements that satisfy \p pred precede the + * elements that fail to satisfy it. The postcondition is that, for some iterator + * \c middle in the range [first, last), pred(*i) is \c true for every + * iterator \c i in the range [first,middle) and \c false for every iterator + * \c i in the range [middle, last). The return value of \p partition is + * \c middle. + * + * Note that the relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition, does guarantee to preserve the relative order. + * + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements which do not satisfy \p pred. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type, + * and \p ForwardIterator is mutable. + * \tparam Predicate is a model of Predicate. + * + * The following code snippet demonstrates how to use \p partition to reorder a + * sequence so that even numbers precede odd numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::partition(A, A + N, + * is_even()); + * // A is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partition + * \see \p stable_partition + * \see \p partition_copy + */ +template + ForwardIterator partition(ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p partition reorders the elements [first, last) based on the function + * object \p pred applied to a stencil range [stencil, stencil + (last - first)), + * such that all of the elements whose corresponding stencil element satisfies \p pred precede all of the elements whose + * corresponding stencil element fails to satisfy it. The postcondition is that, for some iterator + * \c middle in the range [first, last), pred(*stencil_i) is \c true for every iterator + * \c stencil_i in the range [stencil,stencil + (middle - first)) and \c false for every iterator \c stencil_i + * in the range [stencil + (middle - first), stencil + (last - first)). + * The return value of \p stable_partition is \c middle. + * + * Note that the relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition, does guarantee to preserve the relative order. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements whose stencil elements do not satisfy \p pred. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The ranges [first,last) and [stencil, stencil + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p partition to reorder a + * sequence so that even numbers precede odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int S[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::partition(thrust::host, A, A + N, S, is_even()); + * // A is now {1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + * // S is unmodified + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partition + * \see \p stable_partition + * \see \p partition_copy + */ +template +__host__ __device__ + ForwardIterator partition(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator stencil, + Predicate pred); + + +/*! \p partition reorders the elements [first, last) based on the function + * object \p pred applied to a stencil range [stencil, stencil + (last - first)), + * such that all of the elements whose corresponding stencil element satisfies \p pred precede all of the elements whose + * corresponding stencil element fails to satisfy it. The postcondition is that, for some iterator + * \c middle in the range [first, last), pred(*stencil_i) is \c true for every iterator + * \c stencil_i in the range [stencil,stencil + (middle - first)) and \c false for every iterator \c stencil_i + * in the range [stencil + (middle - first), stencil + (last - first)). + * The return value of \p stable_partition is \c middle. + * + * Note that the relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition, does guarantee to preserve the relative order. + * + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements whose stencil elements do not satisfy \p pred. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The ranges [first,last) and [stencil, stencil + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p partition to reorder a + * sequence so that even numbers precede odd numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int S[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::partition(A, A + N, S, is_even()); + * // A is now {1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + * // S is unmodified + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partition + * \see \p stable_partition + * \see \p partition_copy + */ +template + ForwardIterator partition(ForwardIterator first, + ForwardIterator last, + InputIterator stencil, + Predicate pred); + + +/*! \p partition_copy differs from \p partition only in that the reordered + * sequence is written to difference output sequences, rather than in place. + * + * \p partition_copy copies the elements [first, last) based on the + * function object \p pred. All of the elements that satisfy \p pred are copied + * to the range beginning at \p out_true and all the elements that fail to satisfy it + * are copied to the range beginning at \p out_false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type and \p InputIterator's \c value_type + * is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input range shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p partition_copy to separate a + * sequence into two output sequences of even and odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::partition_copy(thrust::host, A, A + N, evens, odds, is_even()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \note The relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition_copy, does guarantee to preserve the relative order. + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p stable_partition_copy + * \see \p partition + */ +template +__host__ __device__ + thrust::pair + partition_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p partition_copy differs from \p partition only in that the reordered + * sequence is written to difference output sequences, rather than in place. + * + * \p partition_copy copies the elements [first, last) based on the + * function object \p pred. All of the elements that satisfy \p pred are copied + * to the range beginning at \p out_true and all the elements that fail to satisfy it + * are copied to the range beginning at \p out_false. + * + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type and \p InputIterator's \c value_type + * is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input range shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p partition_copy to separate a + * sequence into two output sequences of even and odd numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::partition_copy(A, A + N, evens, odds, is_even()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \note The relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition_copy, does guarantee to preserve the relative order. + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p stable_partition_copy + * \see \p partition + */ +template + thrust::pair + partition_copy(InputIterator first, + InputIterator last, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p partition_copy differs from \p partition only in that the reordered + * sequence is written to difference output sequences, rather than in place. + * + * \p partition_copy copies the elements [first, last) based on the + * function object \p pred which is applied to a range of stencil elements. All of the elements + * whose corresponding stencil element satisfies \p pred are copied to the range beginning at \p out_true + * and all the elements whose stencil element fails to satisfy it are copied to the range beginning + * at \p out_false. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input ranges shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p partition_copy to separate a + * sequence into two output sequences of even and odd numbers using the \p thrust::host execution + * policy for parallelization. + * + * \code + * #include + * #include + * #include + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int S[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::stable_partition_copy(thrust::host, A, A + N, S, evens, odds, thrust::identity()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // S remains {0, 1, 0, 1, 0, 1, 0, 1, 0, 1} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \note The relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition_copy, does guarantee to preserve the relative order. + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p stable_partition_copy + * \see \p partition + */ +template +__host__ __device__ + thrust::pair + partition_copy(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p partition_copy differs from \p partition only in that the reordered + * sequence is written to difference output sequences, rather than in place. + * + * \p partition_copy copies the elements [first, last) based on the + * function object \p pred which is applied to a range of stencil elements. All of the elements + * whose corresponding stencil element satisfies \p pred are copied to the range beginning at \p out_true + * and all the elements whose stencil element fails to satisfy it are copied to the range beginning + * at \p out_false. + * + * \param first The beginning of the sequence to reorder. + * \param last The end of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input ranges shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p partition_copy to separate a + * sequence into two output sequences of even and odd numbers. + * + * \code + * #include + * #include + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int S[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::stable_partition_copy(A, A + N, S, evens, odds, thrust::identity()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // S remains {0, 1, 0, 1, 0, 1, 0, 1, 0, 1} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \note The relative order of elements in the two reordered sequences is not + * necessarily the same as it was in the original sequence. A different algorithm, + * \p stable_partition_copy, does guarantee to preserve the relative order. + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p stable_partition_copy + * \see \p partition + */ +template + thrust::pair + partition_copy(InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p stable_partition is much like \p partition : it reorders the elements in the + * range [first, last) based on the function object \p pred, such that all of + * the elements that satisfy \p pred precede all of the elements that fail to satisfy + * it. The postcondition is that, for some iterator \p middle in the range + * [first, last), pred(*i) is \c true for every iterator \c i in the + * range [first,middle) and \c false for every iterator \c i in the range + * [middle, last). The return value of \p stable_partition is \c middle. + * + * \p stable_partition differs from \p partition in that \p stable_partition is + * guaranteed to preserve relative order. That is, if \c x and \c y are elements in + * [first, last), and \c stencil_x and \c stencil_y are the stencil elements + * in corresponding positions within [stencil, stencil + (last - first)), + * and pred(stencil_x) == pred(stencil_y), and if \c x precedes + * \c y, then it will still be true after \p stable_partition that \c x precedes \c y. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements which do not satisfy pred. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type, + * and \p ForwardIterator is mutable. + * \tparam Predicate is a model of Predicate. + * + * The following code snippet demonstrates how to use \p stable_partition to reorder a + * sequence so that even numbers precede odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::stable_partition(thrust::host, + * A, A + N, + * is_even()); + * // A is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/stable_partition + * \see \p partition + * \see \p stable_partition_copy + */ +template +__host__ __device__ + ForwardIterator stable_partition(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p stable_partition is much like \p partition : it reorders the elements in the + * range [first, last) based on the function object \p pred, such that all of + * the elements that satisfy \p pred precede all of the elements that fail to satisfy + * it. The postcondition is that, for some iterator \p middle in the range + * [first, last), pred(*i) is \c true for every iterator \c i in the + * range [first,middle) and \c false for every iterator \c i in the range + * [middle, last). The return value of \p stable_partition is \c middle. + * + * \p stable_partition differs from \p partition in that \p stable_partition is + * guaranteed to preserve relative order. That is, if \c x and \c y are elements in + * [first, last), and \c stencil_x and \c stencil_y are the stencil elements + * in corresponding positions within [stencil, stencil + (last - first)), + * and pred(stencil_x) == pred(stencil_y), and if \c x precedes + * \c y, then it will still be true after \p stable_partition that \c x precedes \c y. + * + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements which do not satisfy pred. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type, + * and \p ForwardIterator is mutable. + * \tparam Predicate is a model of Predicate. + * + * The following code snippet demonstrates how to use \p stable_partition to reorder a + * sequence so that even numbers precede odd numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::stable_partition(A, A + N, + * is_even()); + * // A is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/stable_partition + * \see \p partition + * \see \p stable_partition_copy + */ +template + ForwardIterator stable_partition(ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p stable_partition is much like \p partition: it reorders the elements in the + * range [first, last) based on the function object \p pred applied to a stencil + * range [stencil, stencil + (last - first)), such that all of + * the elements whose corresponding stencil element satisfies \p pred precede all of the elements whose + * corresponding stencil element fails to satisfy it. The postcondition is that, for some iterator + * \c middle in the range [first, last), pred(*stencil_i) is \c true for every iterator + * \c stencil_i in the range [stencil,stencil + (middle - first)) and \c false for every iterator \c stencil_i + * in the range [stencil + (middle - first), stencil + (last - first)). + * The return value of \p stable_partition is \c middle. + * + * \p stable_partition differs from \p partition in that \p stable_partition is + * guaranteed to preserve relative order. That is, if \c x and \c y are elements in + * [first, last), such that pred(x) == pred(y), and if \c x precedes + * \c y, then it will still be true after \p stable_partition that \c x precedes \c y. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements whose stencil elements do not satisfy \p pred. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall not overlap with the range [stencil, stencil + (last - first)). + * + * The following code snippet demonstrates how to use \p stable_partition to reorder a + * sequence so that even numbers precede odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int S[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::stable_partition(thrust::host, A, A + N, S, is_even()); + * // A is now {1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + * // S is unmodified + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/stable_partition + * \see \p partition + * \see \p stable_partition_copy + */ +template +__host__ __device__ + ForwardIterator stable_partition(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator stencil, + Predicate pred); + + +/*! \p stable_partition is much like \p partition: it reorders the elements in the + * range [first, last) based on the function object \p pred applied to a stencil + * range [stencil, stencil + (last - first)), such that all of + * the elements whose corresponding stencil element satisfies \p pred precede all of the elements whose + * corresponding stencil element fails to satisfy it. The postcondition is that, for some iterator + * \c middle in the range [first, last), pred(*stencil_i) is \c true for every iterator + * \c stencil_i in the range [stencil,stencil + (middle - first)) and \c false for every iterator \c stencil_i + * in the range [stencil + (middle - first), stencil + (last - first)). + * The return value of \p stable_partition is \c middle. + * + * \p stable_partition differs from \p partition in that \p stable_partition is + * guaranteed to preserve relative order. That is, if \c x and \c y are elements in + * [first, last), such that pred(x) == pred(y), and if \c x precedes + * \c y, then it will still be true after \p stable_partition that \c x precedes \c y. + * + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return An iterator referring to the first element of the second partition, that is, + * the sequence of the elements whose stencil elements do not satisfy \p pred. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall not overlap with the range [stencil, stencil + (last - first)). + * + * The following code snippet demonstrates how to use \p stable_partition to reorder a + * sequence so that even numbers precede odd numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int S[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::stable_partition(A, A + N, S, is_even()); + * // A is now {1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + * // S is unmodified + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/stable_partition + * \see \p partition + * \see \p stable_partition_copy + */ +template + ForwardIterator stable_partition(ForwardIterator first, + ForwardIterator last, + InputIterator stencil, + Predicate pred); + + +/*! \p stable_partition_copy differs from \p stable_partition only in that the reordered + * sequence is written to different output sequences, rather than in place. + * + * \p stable_partition_copy copies the elements [first, last) based on the + * function object \p pred. All of the elements that satisfy \p pred are copied + * to the range beginning at \p out_true and all the elements that fail to satisfy it + * are copied to the range beginning at \p out_false. + * + * \p stable_partition_copy differs from \p partition_copy in that + * \p stable_partition_copy is guaranteed to preserve relative order. That is, if + * \c x and \c y are elements in [first, last), such that + * pred(x) == pred(y), and if \c x precedes \c y, then it will still be true + * after \p stable_partition_copy that \c x precedes \c y in the output. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type and \p InputIterator's \c value_type + * is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input ranges shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p stable_partition_copy to + * reorder a sequence so that even numbers precede odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::stable_partition_copy(thrust::host, A, A + N, evens, odds, is_even()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p partition_copy + * \see \p stable_partition + */ +template +__host__ __device__ + thrust::pair + stable_partition_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p stable_partition_copy differs from \p stable_partition only in that the reordered + * sequence is written to different output sequences, rather than in place. + * + * \p stable_partition_copy copies the elements [first, last) based on the + * function object \p pred. All of the elements that satisfy \p pred are copied + * to the range beginning at \p out_true and all the elements that fail to satisfy it + * are copied to the range beginning at \p out_false. + * + * \p stable_partition_copy differs from \p partition_copy in that + * \p stable_partition_copy is guaranteed to preserve relative order. That is, if + * \c x and \c y are elements in [first, last), such that + * pred(x) == pred(y), and if \c x precedes \c y, then it will still be true + * after \p stable_partition_copy that \c x precedes \c y in the output. + * + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type and \p InputIterator's \c value_type + * is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input ranges shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p stable_partition_copy to + * reorder a sequence so that even numbers precede odd numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::stable_partition_copy(A, A + N, evens, odds, is_even()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p partition_copy + * \see \p stable_partition + */ +template + thrust::pair + stable_partition_copy(InputIterator first, + InputIterator last, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p stable_partition_copy differs from \p stable_partition only in that the reordered + * sequence is written to different output sequences, rather than in place. + * + * \p stable_partition_copy copies the elements [first, last) based on the + * function object \p pred which is applied to a range of stencil elements. All of the elements + * whose corresponding stencil element satisfies \p pred are copied to the range beginning at \p out_true + * and all the elements whose stencil element fails to satisfy it are copied to the range beginning + * at \p out_false. + * + * \p stable_partition_copy differs from \p partition_copy in that + * \p stable_partition_copy is guaranteed to preserve relative order. That is, if + * \c x and \c y are elements in [first, last), such that + * pred(x) == pred(y), and if \c x precedes \c y, then it will still be true + * after \p stable_partition_copy that \c x precedes \c y in the output. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input ranges shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p stable_partition_copy to + * reorder a sequence so that even numbers precede odd numbers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int S[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::stable_partition_copy(thrust::host, A, A + N, S, evens, odds, thrust::identity()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // S remains {0, 1, 0, 1, 0, 1, 0, 1, 0, 1} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p partition_copy + * \see \p stable_partition + */ +template +__host__ __device__ + thrust::pair + stable_partition_copy(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \p stable_partition_copy differs from \p stable_partition only in that the reordered + * sequence is written to different output sequences, rather than in place. + * + * \p stable_partition_copy copies the elements [first, last) based on the + * function object \p pred which is applied to a range of stencil elements. All of the elements + * whose corresponding stencil element satisfies \p pred are copied to the range beginning at \p out_true + * and all the elements whose stencil element fails to satisfy it are copied to the range beginning + * at \p out_false. + * + * \p stable_partition_copy differs from \p partition_copy in that + * \p stable_partition_copy is guaranteed to preserve relative order. That is, if + * \c x and \c y are elements in [first, last), such that + * pred(x) == pred(y), and if \c x precedes \c y, then it will still be true + * after \p stable_partition_copy that \c x precedes \c y in the output. + * + * \param first The first element of the sequence to reorder. + * \param last One position past the last element of the sequence to reorder. + * \param stencil The beginning of the stencil sequence. + * \param out_true The destination of the resulting sequence of elements which satisfy \p pred. + * \param out_false The destination of the resulting sequence of elements which fail to satisfy \p pred. + * \param pred A function object which decides to which partition each element of the + * sequence [first, last) belongs. + * \return A \p pair p such that p.first is the end of the output range beginning + * at \p out_true and p.second is the end of the output range beginning at + * \p out_false. + * + * \tparam InputIterator1 is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p OutputIterator1 and \p OutputIterator2's \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The input ranges shall not overlap with either output range. + * + * The following code snippet demonstrates how to use \p stable_partition_copy to + * reorder a sequence so that even numbers precede odd numbers. + * + * \code + * #include + * #include + * ... + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int S[] = {0, 1, 0, 1, 0, 1, 0, 1, 0, 1}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * int *evens = result; + * int *odds = result + 5; + * thrust::stable_partition_copy(A, A + N, S, evens, odds, thrust::identity()); + * // A remains {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + * // S remains {0, 1, 0, 1, 0, 1, 0, 1, 0, 1} + * // result is now {2, 4, 6, 8, 10, 1, 3, 5, 7, 9} + * // evens points to {2, 4, 6, 8, 10} + * // odds points to {1, 3, 5, 7, 9} + * \endcode + * + * \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2569.pdf + * \see \p partition_copy + * \see \p stable_partition + */ +template + thrust::pair + stable_partition_copy(InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator1 out_true, + OutputIterator2 out_false, + Predicate pred); + + +/*! \} // end stream_compaction + */ + +/*! \} // end reordering + */ + +/*! \addtogroup searching + * \{ + */ + + +/*! \p partition_point returns an iterator pointing to the end of the true + * partition of a partitioned range. \p partition_point requires the input range + * [first,last) to be a partition; that is, all elements which satisfy + * pred shall appear before those that do not. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range to consider. + * \param last The end of the range to consider. + * \param pred A function object which decides to which partition each element of the + * range [first, last) belongs. + * \return An iterator \c mid such that all_of(first, mid, pred) + * and none_of(mid, last, pred) are both true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall be partitioned by \p pred. + * + * \note Though similar, \p partition_point is not redundant with \p find_if_not. + * \p partition_point's precondition provides an opportunity for a + * faster implemention. + * + * \code + * #include + * #include + * + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * + * ... + * + * int A[] = {2, 4, 6, 8, 10, 1, 3, 5, 7, 9}; + * int * B = thrust::partition_point(thrust::host, A, A + 10, is_even()); + * // B - A is 5 + * // [A, B) contains only even values + * \endcode + * + * \see \p partition + * \see \p find_if_not + */ +template +__host__ __device__ + ForwardIterator partition_point(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p partition_point returns an iterator pointing to the end of the true + * partition of a partitioned range. \p partition_point requires the input range + * [first,last) to be a partition; that is, all elements which satisfy + * pred shall appear before those that do not. + * \param first The beginning of the range to consider. + * \param last The end of the range to consider. + * \param pred A function object which decides to which partition each element of the + * range [first, last) belongs. + * \return An iterator \c mid such that all_of(first, mid, pred) + * and none_of(mid, last, pred) are both true. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall be partitioned by \p pred. + * + * \note Though similar, \p partition_point is not redundant with \p find_if_not. + * \p partition_point's precondition provides an opportunity for a + * faster implemention. + * + * \code + * #include + * + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * + * ... + * + * int A[] = {2, 4, 6, 8, 10, 1, 3, 5, 7, 9}; + * int * B = thrust::partition_point(A, A + 10, is_even()); + * // B - A is 5 + * // [A, B) contains only even values + * \endcode + * + * \see \p partition + * \see \p find_if_not + */ +template + ForwardIterator partition_point(ForwardIterator first, + ForwardIterator last, + Predicate pred); + +/*! \} // searching + */ + +/*! \addtogroup reductions + * \{ + * \addtogroup predicates + * \{ + */ + + +/*! \p is_partitioned returns \c true if the given range + * is partitioned with respect to a predicate, and \c false otherwise. + * + * Specifically, \p is_partitioned returns \c true if [first, last) + * is empty of if [first, last) is partitioned by \p pred, i.e. if + * all elements that satisfy \p pred appear before those that do not. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range to consider. + * \param last The end of the range to consider. + * \param pred A function object which decides to which partition each element of the + * range [first, last) belongs. + * \return \c true if the range [first, last) is partitioned with respect + * to \p pred, or if [first, last) is empty. \c false, otherwise. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \code + * #include + * #include + * + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * + * ... + * + * int A[] = {2, 4, 6, 8, 10, 1, 3, 5, 7, 9}; + * int B[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * + * thrust::is_partitioned(thrust::host, A, A + 10, is_even()); // returns true + * thrust::is_partitioned(thrust::host, B, B + 10, is_even()); // returns false + * \endcode + * + * \see \p partition + */ +template +__host__ __device__ + bool is_partitioned(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + Predicate pred); + + +/*! \p is_partitioned returns \c true if the given range + * is partitioned with respect to a predicate, and \c false otherwise. + * + * Specifically, \p is_partitioned returns \c true if [first, last) + * is empty of if [first, last) is partitioned by \p pred, i.e. if + * all elements that satisfy \p pred appear before those that do not. + * + * \param first The beginning of the range to consider. + * \param last The end of the range to consider. + * \param pred A function object which decides to which partition each element of the + * range [first, last) belongs. + * \return \c true if the range [first, last) is partitioned with respect + * to \p pred, or if [first, last) is empty. \c false, otherwise. + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \code + * #include + * + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int &x) + * { + * return (x % 2) == 0; + * } + * }; + * + * ... + * + * int A[] = {2, 4, 6, 8, 10, 1, 3, 5, 7, 9}; + * int B[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * + * thrust::is_partitioned(A, A + 10, is_even()); // returns true + * thrust::is_partitioned(B, B + 10, is_even()); // returns false + * \endcode + * + * \see \p partition + */ +template + bool is_partitioned(InputIterator first, + InputIterator last, + Predicate pred); + + +/*! \} // end predicates + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/random.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/random.h new file mode 100644 index 0000000000000000000000000000000000000000..7463620b7bceb0f3a09a47c28dabffe765baeae4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/random.h @@ -0,0 +1,117 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file random.h + * \brief Pseudo-random number generators. + */ + +#pragma once + +#include +#include + +// RNGs +#include +#include +#include +#include +#include + +// distributions +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup random Random Number Generation + * \{ + */ + + +/*! \namespace thrust::random + * \brief \p thrust::random is the namespace which contains random number engine class templates, + * random number engine adaptor class templates, engines with predefined parameters, + * and random number distribution class templates. They are provided in a separate namespace + * for import convenience but are also aliased in the top-level \p thrust namespace for + * easy access. + */ +namespace random +{ + +/*! \addtogroup predefined_random Random Number Engines with Predefined Parameters + * \ingroup random + * \{ + */ + +/*! \typedef ranlux24 + * \brief A random number engine with predefined parameters which implements the + * RANLUX level-3 random number generation algorithm. + * \note The 10000th consecutive invocation of a default-constructed object of type \p ranlux24 + * shall produce the value \c 9901578 . + */ +typedef discard_block_engine ranlux24; + + +/*! \typedef ranlux48 + * \brief A random number engine with predefined parameters which implements the + * RANLUX level-4 random number generation algorithm. + * \note The 10000th consecutive invocation of a default-constructed object of type \p ranlux48 + * shall produce the value \c 88229545517833 . + */ +typedef discard_block_engine ranlux48; + + +/*! \typedef taus88 + * \brief A random number engine with predefined parameters which implements + * L'Ecuyer's 1996 three-component Tausworthe random number generator. + * + * \note The 10000th consecutive invocation of a default-constructed object of type \p taus88 + * shall produce the value \c 3535848941 . + */ +typedef xor_combine_engine< + linear_feedback_shift_engine, + 0, + xor_combine_engine< + linear_feedback_shift_engine, 0, + linear_feedback_shift_engine, 0 + >, + 0 +> taus88; + +/*! \typedef default_random_engine + * \brief An implementation-defined "default" random number engine. + * \note \p default_random_engine is currently an alias for \p minstd_rand, and may change + * in a future version. + */ +typedef minstd_rand default_random_engine; + +/*! \} // end predefined_random + */ + +} // end random + + +/*! \} // end random + */ + +// import names into thrust:: +using random::ranlux24; +using random::ranlux48; +using random::taus88; +using random::default_random_engine; + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/reduce.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..c7b378f7297e1278ecc36e761320a350a621b282 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/reduce.h @@ -0,0 +1,781 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file thrust/reduce.h + * \brief Functions for reducing a range to a single value + */ + +#pragma once + +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reductions + * \{ + */ + + +/*! \p reduce is a generalization of summation: it computes the sum (or some + * other binary operation) of all the elements in the range [first, + * last). This version of \p reduce uses \c 0 as the initial value of the + * reduction. \p reduce is similar to the C++ Standard Template Library's + * std::accumulate. The primary difference between the two functions + * is that std::accumulate guarantees the order of summation, while + * \p reduce requires associativity of the binary operation to parallelize + * the reduction. + * + * Note that \p reduce also assumes that the binary reduction operator (in this + * case operator+) is commutative. If the reduction operator is not commutative + * then \p thrust::reduce should not be used. Instead, one could use + * \p inclusive_scan (which does not require commutativity) and select the + * last element of the output array. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return The result of the reduction. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and if \c x and \c y are objects of \p InputIterator's \c value_type, + * then x + y is defined and is convertible to \p InputIterator's + * \c value_type. If \c T is \c InputIterator's \c value_type, then + * T(0) is defined. + * + * The following code snippet demonstrates how to use \p reduce to compute + * the sum of a sequence of integers using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int result = thrust::reduce(thrust::host, data, data + 6); + * + * // result == 9 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/accumulate + */ +template +__host__ __device__ + typename thrust::iterator_traits::value_type + reduce(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last); + + +/*! \p reduce is a generalization of summation: it computes the sum (or some + * other binary operation) of all the elements in the range [first, + * last). This version of \p reduce uses \c 0 as the initial value of the + * reduction. \p reduce is similar to the C++ Standard Template Library's + * std::accumulate. The primary difference between the two functions + * is that std::accumulate guarantees the order of summation, while + * \p reduce requires associativity of the binary operation to parallelize + * the reduction. + * + * Note that \p reduce also assumes that the binary reduction operator (in this + * case operator+) is commutative. If the reduction operator is not commutative + * then \p thrust::reduce should not be used. Instead, one could use + * \p inclusive_scan (which does not require commutativity) and select the + * last element of the output array. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \return The result of the reduction. + * + * \tparam InputIterator is a model of Input Iterator + * and if \c x and \c y are objects of \p InputIterator's \c value_type, + * then x + y is defined and is convertible to \p InputIterator's + * \c value_type. If \c T is \c InputIterator's \c value_type, then + * T(0) is defined. + * + * The following code snippet demonstrates how to use \p reduce to compute + * the sum of a sequence of integers. + * + * \code + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int result = thrust::reduce(data, data + 6); + * + * // result == 9 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/accumulate + */ +template typename + thrust::iterator_traits::value_type reduce(InputIterator first, InputIterator last); + + +/*! \p reduce is a generalization of summation: it computes the sum (or some + * other binary operation) of all the elements in the range [first, + * last). This version of \p reduce uses \p init as the initial value of the + * reduction. \p reduce is similar to the C++ Standard Template Library's + * std::accumulate. The primary difference between the two functions + * is that std::accumulate guarantees the order of summation, while + * \p reduce requires associativity of the binary operation to parallelize + * the reduction. + * + * Note that \p reduce also assumes that the binary reduction operator (in this + * case operator+) is commutative. If the reduction operator is not commutative + * then \p thrust::reduce should not be used. Instead, one could use + * \p inclusive_scan (which does not require commutativity) and select the + * last element of the output array. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param init The initial value. + * \return The result of the reduction. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and if \c x and \c y are objects of \p InputIterator's \c value_type, + * then x + y is defined and is convertible to \p T. + * \tparam T is convertible to \p InputIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p reduce to compute + * the sum of a sequence of integers including an intialization value using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int result = thrust::reduce(thrust::host, data, data + 6, 1); + * + * // result == 10 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/accumulate + */ +template +__host__ __device__ + T reduce(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + T init); + + +/*! \p reduce is a generalization of summation: it computes the sum (or some + * other binary operation) of all the elements in the range [first, + * last). This version of \p reduce uses \p init as the initial value of the + * reduction. \p reduce is similar to the C++ Standard Template Library's + * std::accumulate. The primary difference between the two functions + * is that std::accumulate guarantees the order of summation, while + * \p reduce requires associativity of the binary operation to parallelize + * the reduction. + * + * Note that \p reduce also assumes that the binary reduction operator (in this + * case operator+) is commutative. If the reduction operator is not commutative + * then \p thrust::reduce should not be used. Instead, one could use + * \p inclusive_scan (which does not require commutativity) and select the + * last element of the output array. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param init The initial value. + * \return The result of the reduction. + * + * \tparam InputIterator is a model of Input Iterator + * and if \c x and \c y are objects of \p InputIterator's \c value_type, + * then x + y is defined and is convertible to \p T. + * \tparam T is convertible to \p InputIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p reduce to compute + * the sum of a sequence of integers including an intialization value. + * + * \code + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int result = thrust::reduce(data, data + 6, 1); + * + * // result == 10 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/accumulate + */ +template + T reduce(InputIterator first, + InputIterator last, + T init); + + +/*! \p reduce is a generalization of summation: it computes the sum (or some + * other binary operation) of all the elements in the range [first, + * last). This version of \p reduce uses \p init as the initial value of the + * reduction and \p binary_op as the binary function used for summation. \p reduce + * is similar to the C++ Standard Template Library's std::accumulate. + * The primary difference between the two functions is that std::accumulate + * guarantees the order of summation, while \p reduce requires associativity of + * \p binary_op to parallelize the reduction. + * + * Note that \p reduce also assumes that the binary reduction operator (in this + * case \p binary_op) is commutative. If the reduction operator is not commutative + * then \p thrust::reduce should not be used. Instead, one could use + * \p inclusive_scan (which does not require commutativity) and select the + * last element of the output array. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param init The initial value. + * \param binary_op The binary function used to 'sum' values. + * \return The result of the reduction. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c T. + * \tparam T is a model of Assignable, + * and is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type. + * \tparam BinaryFunction is a model of Binary Function, + * and \p BinaryFunction's \c result_type is convertible to \p OutputType. + * + * The following code snippet demonstrates how to use \p reduce to + * compute the maximum value of a sequence of integers using the \p thrust::host execution policy + * for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int result = thrust::reduce(thrust::host, + * data, data + 6, + * -1, + * thrust::maximum()); + * // result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/accumulate + * \see transform_reduce + */ +template +__host__ __device__ + T reduce(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + T init, + BinaryFunction binary_op); + + +/*! \p reduce is a generalization of summation: it computes the sum (or some + * other binary operation) of all the elements in the range [first, + * last). This version of \p reduce uses \p init as the initial value of the + * reduction and \p binary_op as the binary function used for summation. \p reduce + * is similar to the C++ Standard Template Library's std::accumulate. + * The primary difference between the two functions is that std::accumulate + * guarantees the order of summation, while \p reduce requires associativity of + * \p binary_op to parallelize the reduction. + * + * Note that \p reduce also assumes that the binary reduction operator (in this + * case \p binary_op) is commutative. If the reduction operator is not commutative + * then \p thrust::reduce should not be used. Instead, one could use + * \p inclusive_scan (which does not require commutativity) and select the + * last element of the output array. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param init The initial value. + * \param binary_op The binary function used to 'sum' values. + * \return The result of the reduction. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c T. + * \tparam T is a model of Assignable, + * and is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type. + * \tparam BinaryFunction is a model of Binary Function, + * and \p BinaryFunction's \c result_type is convertible to \p OutputType. + * + * The following code snippet demonstrates how to use \p reduce to + * compute the maximum value of a sequence of integers. + * + * \code + * #include + * #include + * ... + * int data[6] = {1, 0, 2, 2, 1, 3}; + * int result = thrust::reduce(data, data + 6, + * -1, + * thrust::maximum()); + * // result == 3 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/accumulate + * \see transform_reduce + */ +template + T reduce(InputIterator first, + InputIterator last, + T init, + BinaryFunction binary_op); + + +/*! \p reduce_by_key is a generalization of \p reduce to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p reduce_by_key copies the first element of the group to the + * \c keys_output. The corresponding values in the range are reduced using the + * \c plus and the result copied to \c values_output. + * + * This version of \p reduce_by_key uses the function object \c equal_to + * to test for equality and \c plus to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_output The beginning of the output key range. + * \param values_output The beginning of the output value range. + * \return A pair of iterators at end of the ranges [keys_output, keys_output_last) and [values_output, values_output_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p reduce_by_key to + * compact a sequence of key/value pairs and sum values with equal keys using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * new_end = thrust::reduce_by_key(thrust::host, A, A + N, B, C, D); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 21, 9, 3} and new_end.second - D is 4. + * \endcode + * + * \see reduce + * \see unique_copy + * \see unique_by_key + * \see unique_by_key_copy + */ +template +__host__ __device__ + thrust::pair + reduce_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_output, + OutputIterator2 values_output); + + +/*! \p reduce_by_key is a generalization of \p reduce to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p reduce_by_key copies the first element of the group to the + * \c keys_output. The corresponding values in the range are reduced using the + * \c plus and the result copied to \c values_output. + * + * This version of \p reduce_by_key uses the function object \c equal_to + * to test for equality and \c plus to reduce values with equal keys. + * + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_output The beginning of the output key range. + * \param values_output The beginning of the output value range. + * \return A pair of iterators at end of the ranges [keys_output, keys_output_last) and [values_output, values_output_last). + * + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p reduce_by_key to + * compact a sequence of key/value pairs and sum values with equal keys. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * new_end = thrust::reduce_by_key(A, A + N, B, C, D); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 21, 9, 3} and new_end.second - D is 4. + * \endcode + * + * \see reduce + * \see unique_copy + * \see unique_by_key + * \see unique_by_key_copy + */ +template + thrust::pair + reduce_by_key(InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_output, + OutputIterator2 values_output); + + +/*! \p reduce_by_key is a generalization of \p reduce to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p reduce_by_key copies the first element of the group to the + * \c keys_output. The corresponding values in the range are reduced using the + * \c plus and the result copied to \c values_output. + * + * This version of \p reduce_by_key uses the function object \c binary_pred + * to test for equality and \c plus to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_output The beginning of the output key range. + * \param values_output The beginning of the output value range. + * \param binary_pred The binary predicate used to determine equality. + * \return A pair of iterators at end of the ranges [keys_output, keys_output_last) and [values_output, values_output_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p reduce_by_key to + * compact a sequence of key/value pairs and sum values with equal keys using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * new_end = thrust::reduce_by_key(thrust::host, A, A + N, B, C, D, binary_pred); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 21, 9, 3} and new_end.second - D is 4. + * \endcode + * + * \see reduce + * \see unique_copy + * \see unique_by_key + * \see unique_by_key_copy + */ +template +__host__ __device__ + thrust::pair + reduce_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_output, + OutputIterator2 values_output, + BinaryPredicate binary_pred); + + +/*! \p reduce_by_key is a generalization of \p reduce to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p reduce_by_key copies the first element of the group to the + * \c keys_output. The corresponding values in the range are reduced using the + * \c plus and the result copied to \c values_output. + * + * This version of \p reduce_by_key uses the function object \c binary_pred + * to test for equality and \c plus to reduce values with equal keys. + * + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_output The beginning of the output key range. + * \param values_output The beginning of the output value range. + * \param binary_pred The binary predicate used to determine equality. + * \return A pair of iterators at end of the ranges [keys_output, keys_output_last) and [values_output, values_output_last). + * + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p reduce_by_key to + * compact a sequence of key/value pairs and sum values with equal keys. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * new_end = thrust::reduce_by_key(A, A + N, B, C, D, binary_pred); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 21, 9, 3} and new_end.second - D is 4. + * \endcode + * + * \see reduce + * \see unique_copy + * \see unique_by_key + * \see unique_by_key_copy + */ +template + thrust::pair + reduce_by_key(InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_output, + OutputIterator2 values_output, + BinaryPredicate binary_pred); + + +/*! \p reduce_by_key is a generalization of \p reduce to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p reduce_by_key copies the first element of the group to the + * \c keys_output. The corresponding values in the range are reduced using the + * \c BinaryFunction \c binary_op and the result copied to \c values_output. + * Specifically, if consecutive key iterators \c i and \c (i + 1) are + * such that binary_pred(*i, *(i+1)) is \c true, then the corresponding + * values are reduced to a single value with \c binary_op. + * + * This version of \p reduce_by_key uses the function object \c binary_pred + * to test for equality and \c binary_op to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_output The beginning of the output key range. + * \param values_output The beginning of the output value range. + * \param binary_pred The binary predicate used to determine equality. + * \param binary_op The binary function used to accumulate values. + * \return A pair of iterators at end of the ranges [keys_output, keys_output_last) and [values_output, values_output_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * \tparam BinaryFunction is a model of Binary Function + * and \c BinaryFunction's \c result_type is convertible to \c OutputIterator2's \c value_type. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p reduce_by_key to + * compact a sequence of key/value pairs and sum values with equal keys using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * thrust::plus binary_op; + * new_end = thrust::reduce_by_key(thrust::host, A, A + N, B, C, D, binary_pred, binary_op); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 21, 9, 3} and new_end.second - D is 4. + * \endcode + * + * \see reduce + * \see unique_copy + * \see unique_by_key + * \see unique_by_key_copy + */ +template +__host__ __device__ + thrust::pair + reduce_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_output, + OutputIterator2 values_output, + BinaryPredicate binary_pred, + BinaryFunction binary_op); + + +/*! \p reduce_by_key is a generalization of \p reduce to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p reduce_by_key copies the first element of the group to the + * \c keys_output. The corresponding values in the range are reduced using the + * \c BinaryFunction \c binary_op and the result copied to \c values_output. + * Specifically, if consecutive key iterators \c i and \c (i + 1) are + * such that binary_pred(*i, *(i+1)) is \c true, then the corresponding + * values are reduced to a single value with \c binary_op. + * + * This version of \p reduce_by_key uses the function object \c binary_pred + * to test for equality and \c binary_op to reduce values with equal keys. + * + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_output The beginning of the output key range. + * \param values_output The beginning of the output value range. + * \param binary_pred The binary predicate used to determine equality. + * \param binary_op The binary function used to accumulate values. + * \return A pair of iterators at end of the ranges [keys_output, keys_output_last) and [values_output, values_output_last). + * + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * \tparam BinaryFunction is a model of Binary Function + * and \c BinaryFunction's \c result_type is convertible to \c OutputIterator2's \c value_type. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p reduce_by_key to + * compact a sequence of key/value pairs and sum values with equal keys. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * thrust::plus binary_op; + * new_end = thrust::reduce_by_key(A, A + N, B, C, D, binary_pred, binary_op); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 21, 9, 3} and new_end.second - D is 4. + * \endcode + * + * \see reduce + * \see unique_copy + * \see unique_by_key + * \see unique_by_key_copy + */ +template + thrust::pair + reduce_by_key(InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_output, + OutputIterator2 values_output, + BinaryPredicate binary_pred, + BinaryFunction binary_op); + + +/*! \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/remove.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/remove.h new file mode 100644 index 0000000000000000000000000000000000000000..a57fcf211cf3739d9e1fceb478d9ec3e5c4a0881 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/remove.h @@ -0,0 +1,802 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file remove.h + * \brief Functions for removing elements from a range + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup stream_compaction Stream Compaction + * \ingroup reordering + * \{ + * + */ + + +/*! \p remove removes from the range [first, last) all elements that are + * equal to \p value. That is, \p remove returns an iterator \p new_last such + * that the range [first, new_last) contains no elements equal to + * \p value. The iterators in the range [new_first,last) are all still + * dereferenceable, but the elements that they point to are unspecified. \p remove + * is stable, meaning that the relative order of elements that are not equal to + * \p value is unchanged. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param value The value to remove from the range [first, last). + * Elements which are equal to value are removed from the sequence. + * \return A \p ForwardIterator pointing to the end of the resulting range of + * elements which are not equal to \p value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam T is a model of Equality Comparable, + * and objects of type \p T can be compared for equality with objects of \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p remove to remove a number + * of interest from a range using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 6; + * int A[N] = {3, 1, 4, 1, 5, 9}; + * int *new_end = thrust::remove(A, A + N, 1); + * // The first four values of A are now {3, 4, 5, 9} + * // Values beyond new_end are unspecified + * \endcode + * + * \note The meaning of "removal" is somewhat subtle. \p remove does not destroy any + * iterators, and does not change the distance between \p first and \p last. + * (There's no way that it could do anything of the sort.) So, for example, if + * \c V is a device_vector, remove(V.begin(), V.end(), 0) does not + * change V.size(): \c V will contain just as many elements as it did + * before. \p remove returns an iterator that points to the end of the resulting + * range after elements have been removed from it; it follows that the elements + * after that iterator are of no interest, and may be discarded. If you are + * removing elements from a + * Sequence, you may + * simply erase them. That is, a reasonable way of removing elements from a + * Sequence is + * S.erase(remove(S.begin(), S.end(), x), S.end()). + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove + * \see remove_if + * \see remove_copy + * \see remove_copy_if + */ +template +__host__ __device__ + ForwardIterator remove(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + const T &value); + + +/*! \p remove removes from the range [first, last) all elements that are + * equal to \p value. That is, \p remove returns an iterator \p new_last such + * that the range [first, new_last) contains no elements equal to + * \p value. The iterators in the range [new_first,last) are all still + * dereferenceable, but the elements that they point to are unspecified. \p remove + * is stable, meaning that the relative order of elements that are not equal to + * \p value is unchanged. + * + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param value The value to remove from the range [first, last). + * Elements which are equal to value are removed from the sequence. + * \return A \p ForwardIterator pointing to the end of the resulting range of + * elements which are not equal to \p value. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam T is a model of Equality Comparable, + * and objects of type \p T can be compared for equality with objects of \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p remove to remove a number + * of interest from a range. + * + * \code + * #include + * ... + * const int N = 6; + * int A[N] = {3, 1, 4, 1, 5, 9}; + * int *new_end = thrust::remove(A, A + N, 1); + * // The first four values of A are now {3, 4, 5, 9} + * // Values beyond new_end are unspecified + * \endcode + * + * \note The meaning of "removal" is somewhat subtle. \p remove does not destroy any + * iterators, and does not change the distance between \p first and \p last. + * (There's no way that it could do anything of the sort.) So, for example, if + * \c V is a device_vector, remove(V.begin(), V.end(), 0) does not + * change V.size(): \c V will contain just as many elements as it did + * before. \p remove returns an iterator that points to the end of the resulting + * range after elements have been removed from it; it follows that the elements + * after that iterator are of no interest, and may be discarded. If you are + * removing elements from a + * Sequence, you may + * simply erase them. That is, a reasonable way of removing elements from a + * Sequence is + * S.erase(remove(S.begin(), S.end(), x), S.end()). + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove + * \see remove_if + * \see remove_copy + * \see remove_copy_if + */ +template + ForwardIterator remove(ForwardIterator first, + ForwardIterator last, + const T &value); + + +/*! \p remove_copy copies elements that are not equal to \p value from the range + * [first, last) to a range beginning at \p result. The return value is + * the end of the resulting range. This operation is stable, meaning that the + * relative order of the elements that are copied is the same as in + * the range [first, last). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param result The resulting range is copied to the sequence beginning at this + * location. + * \param value The value to omit from the copied range. + * \return An OutputIterator pointing to the end of the resulting range of elements + * which are not equal to \p value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam T is a model of Equality Comparable, + * and objects of type \p T can be compared for equality with objects of \p InputIterator's \c value_type. + * + * \pre The range [first, last) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_copy to copy + * a sequence of numbers to an output range while omitting a value of interest using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int result[N-2]; + * thrust::remove_copy(thrust::host, V, V + N, result, 0); + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-2, -1, 1, 2} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove_copy + * \see remove + * \see remove_if + * \see remove_copy_if + */ +template +__host__ __device__ + OutputIterator remove_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + const T &value); + + +/*! \p remove_copy copies elements that are not equal to \p value from the range + * [first, last) to a range beginning at \p result. The return value is + * the end of the resulting range. This operation is stable, meaning that the + * relative order of the elements that are copied is the same as in + * the range [first, last). + * + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param result The resulting range is copied to the sequence beginning at this + * location. + * \param value The value to omit from the copied range. + * \return An OutputIterator pointing to the end of the resulting range of elements + * which are not equal to \p value. + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam T is a model of Equality Comparable, + * and objects of type \p T can be compared for equality with objects of \p InputIterator's \c value_type. + * + * \pre The range [first, last) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_copy to copy + * a sequence of numbers to an output range while omitting a value of interest. + * + * \code + * #include + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int result[N-2]; + * thrust::remove_copy(V, V + N, result, 0); + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-2, -1, 1, 2} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove_copy + * \see remove + * \see remove_if + * \see remove_copy_if + */ +template + OutputIterator remove_copy(InputIterator first, + InputIterator last, + OutputIterator result, + const T &value); + + +/*! \p remove_if removes from the range [first, last) every element \p x + * such that pred(x) is \c true. That is, \p remove_if returns an + * iterator \c new_last such that the range [first,new_last) contains + * no elements for which \p pred is \c true. The iterators in the range + * [new_last,last) are all still dereferenceable, but the elements that + * they point to are unspecified. \p remove_if is stable, meaning that the + * relative order of elements that are not removed is unchanged. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param pred A predicate to evaluate for each element of the range + * [first,last). Elements for which \p pred evaluates to + * \c true are removed from the sequence. + * \return A ForwardIterator pointing to the end of the resulting range of + * elements for which \p pred evaluated to \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * The following code snippet demonstrates how to use \p remove_if to remove + * all even numbers from an array of integers using the \p thrust::host execution policy for + * parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * const int N = 6; + * int A[N] = {1, 4, 2, 8, 5, 7}; + * int *new_end = thrust::remove_if(thrust::host, A, A + N, is_even()); + * // The first three values of A are now {1, 5, 7} + * // Values beyond new_end are unspecified + * \endcode + * + * \note The meaning of "removal" is somewhat subtle. \p remove_if does not + * destroy any iterators, and does not change the distance between \p first and + * \p last. (There's no way that it could do anything of the sort.) So, for + * example, if \c V is a device_vector, + * remove_if(V.begin(), V.end(), pred) does not change + * V.size(): \c V will contain just as many elements as it did before. + * \p remove_if returns an iterator that points to the end of the resulting + * range after elements have been removed from it; it follows that the elements + * after that iterator are of no interest, and may be discarded. If you are + * removing elements from a + * Sequence, you may + * simply erase them. That is, a reasonable way of removing elements from a + * Sequence is + * S.erase(remove_if(S.begin(), S.end(), pred), S.end()). + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove + * \see remove + * \see remove_copy + * \see remove_copy_if + */ +template +__host__ __device__ + ForwardIterator remove_if(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p remove_if removes from the range [first, last) every element \p x + * such that pred(x) is \c true. That is, \p remove_if returns an + * iterator \c new_last such that the range [first,new_last) contains + * no elements for which \p pred is \c true. The iterators in the range + * [new_last,last) are all still dereferenceable, but the elements that + * they point to are unspecified. \p remove_if is stable, meaning that the + * relative order of elements that are not removed is unchanged. + * + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param pred A predicate to evaluate for each element of the range + * [first,last). Elements for which \p pred evaluates to + * \c true are removed from the sequence. + * \return A ForwardIterator pointing to the end of the resulting range of + * elements for which \p pred evaluated to \c true. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * The following code snippet demonstrates how to use \p remove_if to remove + * all even numbers from an array of integers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * const int N = 6; + * int A[N] = {1, 4, 2, 8, 5, 7}; + * int *new_end = thrust::remove_if(A, A + N, is_even()); + * // The first three values of A are now {1, 5, 7} + * // Values beyond new_end are unspecified + * \endcode + * + * \note The meaning of "removal" is somewhat subtle. \p remove_if does not + * destroy any iterators, and does not change the distance between \p first and + * \p last. (There's no way that it could do anything of the sort.) So, for + * example, if \c V is a device_vector, + * remove_if(V.begin(), V.end(), pred) does not change + * V.size(): \c V will contain just as many elements as it did before. + * \p remove_if returns an iterator that points to the end of the resulting + * range after elements have been removed from it; it follows that the elements + * after that iterator are of no interest, and may be discarded. If you are + * removing elements from a + * Sequence, you may + * simply erase them. That is, a reasonable way of removing elements from a + * Sequence is + * S.erase(remove_if(S.begin(), S.end(), pred), S.end()). + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove + * \see remove + * \see remove_copy + * \see remove_copy_if + */ +template + ForwardIterator remove_if(ForwardIterator first, + ForwardIterator last, + Predicate pred); + + +/*! \p remove_copy_if copies elements from the range [first,last) to a + * range beginning at \p result, except that elements for which \p pred is + * \c true are not copied. The return value is the end of the resulting range. + * This operation is stable, meaning that the relative order of the elements that + * are copied is the same as the range [first,last). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param result The resulting range is copied to the sequence beginning at this + * location. + * \param pred A predicate to evaluate for each element of the range [first,last). + * Elements for which \p pred evaluates to \c false are not copied + * to the resulting sequence. + * \return An OutputIterator pointing to the end of the resulting range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_copy_if to copy + * a sequence of numbers to an output range while omitting even numbers using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int result[2]; + * thrust::remove_copy_if(thrust::host, V, V + N, result, is_even()); + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-1, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove_copy + * \see remove + * \see remove_copy + * \see remove_if + */ +template +__host__ __device__ + OutputIterator remove_copy_if(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + Predicate pred); + + +/*! \p remove_copy_if copies elements from the range [first,last) to a + * range beginning at \p result, except that elements for which \p pred is + * \c true are not copied. The return value is the end of the resulting range. + * This operation is stable, meaning that the relative order of the elements that + * are copied is the same as the range [first,last). + * + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param result The resulting range is copied to the sequence beginning at this + * location. + * \param pred A predicate to evaluate for each element of the range [first,last). + * Elements for which \p pred evaluates to \c false are not copied + * to the resulting sequence. + * \return An OutputIterator pointing to the end of the resulting range. + * + * \tparam InputIterator is a model of Input Iterator, + * \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_copy_if to copy + * a sequence of numbers to an output range while omitting even numbers. + * + * \code + * #include + * ... + * struct is_even + * { + * __host__ __device__ + * bool operator()(const int x) + * { + * return (x % 2) == 0; + * } + * }; + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int result[2]; + * thrust::remove_copy_if(V, V + N, result, is_even()); + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-1, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove_copy + * \see remove + * \see remove_copy + * \see remove_if + */ +template + OutputIterator remove_copy_if(InputIterator first, + InputIterator last, + OutputIterator result, + Predicate pred); + + +/*! \p remove_if removes from the range [first, last) every element \p x + * such that pred(x) is \c true. That is, \p remove_if returns an + * iterator \c new_last such that the range [first, new_last) contains + * no elements for which \p pred of the corresponding stencil value is \c true. + * The iterators in the range [new_last,last) are all still dereferenceable, + * but the elements that they point to are unspecified. \p remove_if is stable, + * meaning that the relative order of elements that are not removed is unchanged. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param stencil The beginning of the stencil sequence. + * \param pred A predicate to evaluate for each element of the range + * [stencil, stencil + (last - first)). Elements for which \p pred evaluates to + * \c true are removed from the sequence [first, last) + * \return A ForwardIterator pointing to the end of the resulting range of + * elements for which \p pred evaluated to \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall not overlap the range [result, result + (last - first)). + * \pre The range [stencil, stencil + (last - first)) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_if to remove + * specific elements from an array of integers using the \p thrust::host execution policy for + * parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 6; + * int A[N] = {1, 4, 2, 8, 5, 7}; + * int S[N] = {0, 1, 1, 1, 0, 0}; + * + * int *new_end = thrust::remove_if(thrust::host, A, A + N, S, thrust::identity()); + * // The first three values of A are now {1, 5, 7} + * // Values beyond new_end are unspecified + * \endcode + * + * \note The range [first, last) is not permitted to overlap with the range [stencil, stencil + (last - first)). + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove + * \see remove + * \see remove_copy + * \see remove_copy_if + */ +template +__host__ __device__ + ForwardIterator remove_if(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + InputIterator stencil, + Predicate pred); + + +/*! \p remove_if removes from the range [first, last) every element \p x + * such that pred(x) is \c true. That is, \p remove_if returns an + * iterator \c new_last such that the range [first, new_last) contains + * no elements for which \p pred of the corresponding stencil value is \c true. + * The iterators in the range [new_last,last) are all still dereferenceable, + * but the elements that they point to are unspecified. \p remove_if is stable, + * meaning that the relative order of elements that are not removed is unchanged. + * + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param stencil The beginning of the stencil sequence. + * \param pred A predicate to evaluate for each element of the range + * [stencil, stencil + (last - first)). Elements for which \p pred evaluates to + * \c true are removed from the sequence [first, last) + * \return A ForwardIterator pointing to the end of the resulting range of + * elements for which \p pred evaluated to \c true. + * + * \tparam ForwardIterator is a model of Forward Iterator + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [first, last) shall not overlap the range [result, result + (last - first)). + * \pre The range [stencil, stencil + (last - first)) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_if to remove + * specific elements from an array of integers. + * + * \code + * #include + * ... + * const int N = 6; + * int A[N] = {1, 4, 2, 8, 5, 7}; + * int S[N] = {0, 1, 1, 1, 0, 0}; + * + * int *new_end = thrust::remove_if(A, A + N, S, thrust::identity()); + * // The first three values of A are now {1, 5, 7} + * // Values beyond new_end are unspecified + * \endcode + * + * \note The range [first, last) is not permitted to overlap with the range [stencil, stencil + (last - first)). + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove + * \see remove + * \see remove_copy + * \see remove_copy_if + */ +template + ForwardIterator remove_if(ForwardIterator first, + ForwardIterator last, + InputIterator stencil, + Predicate pred); + + +/*! \p remove_copy_if copies elements from the range [first,last) to a + * range beginning at \p result, except that elements for which \p pred of the + * corresponding stencil value is \c true are not copied. The return value is + * the end of the resulting range. This operation is stable, meaning that the + * relative order of the elements that are copied is the same as the + * range [first,last). + * + * The algorithm's execution policy is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param stencil The beginning of the stencil sequence. + * \param result The resulting range is copied to the sequence beginning at this + * location. + * \param pred A predicate to evaluate for each element of the range [first,last). + * Elements for which \p pred evaluates to \c false are not copied + * to the resulting sequence. + * \return An OutputIterator pointing to the end of the resulting range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [stencil, stencil + (last - first)) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_copy_if to copy + * a sequence of numbers to an output range while omitting specific elements using the \p thrust::host + * execution policy for parallelization. + * + * \code + * #include + * #include + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int S[N] = { 1, 1, 0, 1, 0, 1}; + * int result[2]; + * thrust::remove_copy_if(thrust::host, V, V + N, S, result, thrust::identity()); + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-1, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove_copy + * \see remove + * \see remove_copy + * \see remove_if + * \see copy_if + */ +template +__host__ __device__ + OutputIterator remove_copy_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator result, + Predicate pred); + + +/*! \p remove_copy_if copies elements from the range [first,last) to a + * range beginning at \p result, except that elements for which \p pred of the + * corresponding stencil value is \c true are not copied. The return value is + * the end of the resulting range. This operation is stable, meaning that the + * relative order of the elements that are copied is the same as the + * range [first,last). + * + * \param first The beginning of the range of interest. + * \param last The end of the range of interest. + * \param stencil The beginning of the stencil sequence. + * \param result The resulting range is copied to the sequence beginning at this + * location. + * \param pred A predicate to evaluate for each element of the range [first,last). + * Elements for which \p pred evaluates to \c false are not copied + * to the resulting sequence. + * \return An OutputIterator pointing to the end of the resulting range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * + * \pre The range [stencil, stencil + (last - first)) shall not overlap the range [result, result + (last - first)). + * + * The following code snippet demonstrates how to use \p remove_copy_if to copy + * a sequence of numbers to an output range while omitting specific elements. + * + * \code + * #include + * ... + * const int N = 6; + * int V[N] = {-2, 0, -1, 0, 1, 2}; + * int S[N] = { 1, 1, 0, 1, 0, 1}; + * int result[2]; + * thrust::remove_copy_if(V, V + N, S, result, thrust::identity()); + * // V remains {-2, 0, -1, 0, 1, 2} + * // result is now {-1, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/remove_copy + * \see remove + * \see remove_copy + * \see remove_if + * \see copy_if + */ +template + OutputIterator remove_copy_if(InputIterator1 first, + InputIterator1 last, + InputIterator2 stencil, + OutputIterator result, + Predicate pred); + + +/*! \} // end stream_compaction + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/replace.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/replace.h new file mode 100644 index 0000000000000000000000000000000000000000..a5c0320c423a1254853394ae1b90023ac836a4c5 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/replace.h @@ -0,0 +1,819 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file replace.h + * \brief Functions for replacing elements in a range with a particular value + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup transformations + * \addtogroup replacing + * \ingroup transformations + * \{ + */ + + +/*! \p replace replaces every element in the range [first, last) equal to \p old_value + * with \p new_value. That is: for every iterator \c i, if *i == old_value + * then it performs the assignment *i = new_value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence of interest. + * \param last The end of the sequence of interest. + * \param old_value The value to replace. + * \param new_value The new value to replace \p old_value. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam T is a model of Assignable, + * \p T is a model of EqualityComparable, + * objects of \p T may be compared for equality with objects of + * \p ForwardIterator's \c value_type, + * and \p T is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p replace to replace + * a value of interest in a \c device_vector with another using the \p thrust::device + * execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = 2; + * A[2] = 3; + * A[3] = 1; + * + * thrust::replace(thrust::device, A.begin(), A.end(), 1, 99); + * + * // A contains [99, 2, 3, 99] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace + * \see \c replace_if + * \see \c replace_copy + * \see \c replace_copy_if + */ +template +__host__ __device__ + void replace(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, ForwardIterator last, + const T &old_value, + const T &new_value); + + +/*! \p replace replaces every element in the range [first, last) equal to \p old_value + * with \p new_value. That is: for every iterator \c i, if *i == old_value + * then it performs the assignment *i = new_value. + * + * \param first The beginning of the sequence of interest. + * \param last The end of the sequence of interest. + * \param old_value The value to replace. + * \param new_value The new value to replace \p old_value. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam T is a model of Assignable, + * \p T is a model of EqualityComparable, + * objects of \p T may be compared for equality with objects of + * \p ForwardIterator's \c value_type, + * and \p T is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p replace to replace + * a value of interest in a \c device_vector with another. + * + * \code + * #include + * #include + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = 2; + * A[2] = 3; + * A[3] = 1; + * + * thrust::replace(A.begin(), A.end(), 1, 99); + * + * // A contains [99, 2, 3, 99] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace + * \see \c replace_if + * \see \c replace_copy + * \see \c replace_copy_if + */ +template + void replace(ForwardIterator first, ForwardIterator last, const T &old_value, + const T &new_value); + + +/*! \p replace_if replaces every element in the range [first, last) for which + * \p pred returns \c true with \p new_value. That is: for every iterator \c i, if + * pred(*i) is \c true then it performs the assignment *i = new_value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence of interest. + * \param last The end of the sequence of interest. + * \param pred The predicate to test on every value of the range [first,last). + * \param new_value The new value to replace elements which pred(*i) evaluates + * to \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p replace_if to replace + * a \c device_vector's negative elements with \c 0 using the \p thrust::device execution policy + * for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = -3; + * A[2] = 2; + * A[3] = -1; + * + * is_less_than_zero pred; + * + * thrust::replace_if(thrust::device, A.begin(), A.end(), pred, 0); + * + * // A contains [1, 0, 2, 0] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace + * \see \c replace + * \see \c replace_copy + * \see \c replace_copy_if + */ +template +__host__ __device__ + void replace_if(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, ForwardIterator last, + Predicate pred, + const T &new_value); + + +/*! \p replace_if replaces every element in the range [first, last) for which + * \p pred returns \c true with \p new_value. That is: for every iterator \c i, if + * pred(*i) is \c true then it performs the assignment *i = new_value. + * + * \param first The beginning of the sequence of interest. + * \param last The end of the sequence of interest. + * \param pred The predicate to test on every value of the range [first,last). + * \param new_value The new value to replace elements which pred(*i) evaluates + * to \c true. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p replace_if to replace + * a \c device_vector's negative elements with \c 0. + * + * \code + * #include + * #include + * ... + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = -3; + * A[2] = 2; + * A[3] = -1; + * + * is_less_than_zero pred; + * + * thrust::replace_if(A.begin(), A.end(), pred, 0); + * + * // A contains [1, 0, 2, 0] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace + * \see \c replace + * \see \c replace_copy + * \see \c replace_copy_if + */ +template + void replace_if(ForwardIterator first, ForwardIterator last, + Predicate pred, + const T &new_value); + + +/*! \p replace_if replaces every element in the range [first, last) for which + * pred(*s) returns \c true with \p new_value. That is: for every iterator + * \c i in the range [first, last), and \c s in the range [stencil, stencil + (last - first)), + * if pred(*s) is \c true then it performs the assignment *i = new_value. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence of interest. + * \param last The end of the sequence of interest. + * \param stencil The beginning of the stencil sequence. + * \param pred The predicate to test on every value of the range [first,last). + * \param new_value The new value to replace elements which pred(*i) evaluates + * to \c true. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p replace_if to replace + * a \c device_vector's element with \c 0 when its corresponding stencil element is less than zero + * using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 10; + * A[1] = 20; + * A[2] = 30; + * A[3] = 40; + * + * thrust::device_vector S(4); + * S[0] = -1; + * S[1] = 0; + * S[2] = -1; + * S[3] = 0; + * + * is_less_than_zero pred; + * thrust::replace_if(thrust::device, A.begin(), A.end(), S.begin(), pred, 0); + * + * // A contains [0, 20, 0, 40] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace + * \see \c replace + * \see \c replace_copy + * \see \c replace_copy_if + */ +template +__host__ __device__ + void replace_if(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, ForwardIterator last, + InputIterator stencil, + Predicate pred, + const T &new_value); + + +/*! \p replace_if replaces every element in the range [first, last) for which + * pred(*s) returns \c true with \p new_value. That is: for every iterator + * \c i in the range [first, last), and \c s in the range [stencil, stencil + (last - first)), + * if pred(*s) is \c true then it performs the assignment *i = new_value. + * + * \param first The beginning of the sequence of interest. + * \param last The end of the sequence of interest. + * \param stencil The beginning of the stencil sequence. + * \param pred The predicate to test on every value of the range [first,last). + * \param new_value The new value to replace elements which pred(*i) evaluates + * to \c true. + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p ForwardIterator's \c value_type. + * + * The following code snippet demonstrates how to use \p replace_if to replace + * a \c device_vector's element with \c 0 when its corresponding stencil element is less than zero. + * + * \code + * #include + * #include + * + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 10; + * A[1] = 20; + * A[2] = 30; + * A[3] = 40; + * + * thrust::device_vector S(4); + * S[0] = -1; + * S[1] = 0; + * S[2] = -1; + * S[3] = 0; + * + * is_less_than_zero pred; + * thrust::replace_if(A.begin(), A.end(), S.begin(), pred, 0); + * + * // A contains [0, 20, 0, 40] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace + * \see \c replace + * \see \c replace_copy + * \see \c replace_copy_if + */ +template + void replace_if(ForwardIterator first, ForwardIterator last, + InputIterator stencil, + Predicate pred, + const T &new_value); + + +/*! \p replace_copy copies elements from the range [first, last) to the range + * [result, result + (last-first)), except that any element equal to \p old_value + * is not copied; \p new_value is copied instead. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, \p replace_copy + * performs the assignment *(result+n) = new_value if *(first+n) == old_value, + * and *(result+n) = *(first+n) otherwise. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to copy from. + * \param last The end of the sequence to copy from. + * \param result The beginning of the sequence to copy to. + * \param old_value The value to replace. + * \param new_value The replacement value for which *i == old_value evaluates to \c true. + * \return result + (last-first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam T is a model of Assignable, + * \p T is a model of Equality Comparable, + * \p T may be compared for equality with \p InputIterator's \c value_type, + * and \p T is convertible to \p OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = 2; + * A[2] = 3; + * A[3] = 1; + * + * thrust::device_vector B(4); + * + * thrust::replace_copy(thrust::device, A.begin(), A.end(), B.begin(), 1, 99); + * + * // B contains [99, 2, 3, 99] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace_copy + * \see \c copy + * \see \c replace + * \see \c replace_if + * \see \c replace_copy_if + */ +template +__host__ __device__ + OutputIterator replace_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result, + const T &old_value, + const T &new_value); + + +/*! \p replace_copy copies elements from the range [first, last) to the range + * [result, result + (last-first)), except that any element equal to \p old_value + * is not copied; \p new_value is copied instead. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, \p replace_copy + * performs the assignment *(result+n) = new_value if *(first+n) == old_value, + * and *(result+n) = *(first+n) otherwise. + * + * \param first The beginning of the sequence to copy from. + * \param last The end of the sequence to copy from. + * \param result The beginning of the sequence to copy to. + * \param old_value The value to replace. + * \param new_value The replacement value for which *i == old_value evaluates to \c true. + * \return result + (last-first) + * + * \tparam InputIterator is a model of Input Iterator. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam T is a model of Assignable, + * \p T is a model of Equality Comparable, + * \p T may be compared for equality with \p InputIterator's \c value_type, + * and \p T is convertible to \p OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. + * + * \code + * #include + * #include + * ... + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = 2; + * A[2] = 3; + * A[3] = 1; + * + * thrust::device_vector B(4); + * + * thrust::replace_copy(A.begin(), A.end(), B.begin(), 1, 99); + * + * // B contains [99, 2, 3, 99] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace_copy + * \see \c copy + * \see \c replace + * \see \c replace_if + * \see \c replace_copy_if + */ +template + OutputIterator replace_copy(InputIterator first, InputIterator last, + OutputIterator result, const T &old_value, + const T &new_value); + + +/*! \p replace_copy_if copies elements from the range [first, last) to the range + * [result, result + (last-first)), except that any element for which \p pred + * is \c true is not copied; \p new_value is copied instead. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p replace_copy_if performs the assignment *(result+n) = new_value if + * pred(*(first+n)), and *(result+n) = *(first+n) otherwise. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to copy from. + * \param last The end of the sequence to copy from. + * \param result The beginning of the sequence to copy to. + * \param pred The predicate to test on every value of the range [first,last). + * \param new_value The replacement value to assign pred(*i) evaluates to \c true. + * \return result + (last-first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. + * + * \code + * #include + * #include + * #include + * + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = -3; + * A[2] = 2; + * A[3] = -1; + + * thrust::device_vector B(4); + * is_less_than_zero pred; + * + * thrust::replace_copy_if(thrust::device, A.begin(), A.end(), B.begin(), pred, 0); + * + * // B contains [1, 0, 2, 0] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace_copy + * \see \c replace + * \see \c replace_if + * \see \c replace_copy + */ +template +__host__ __device__ + OutputIterator replace_copy_if(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result, + Predicate pred, + const T &new_value); + + +/*! \p replace_copy_if copies elements from the range [first, last) to the range + * [result, result + (last-first)), except that any element for which \p pred + * is \c true is not copied; \p new_value is copied instead. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p replace_copy_if performs the assignment *(result+n) = new_value if + * pred(*(first+n)), and *(result+n) = *(first+n) otherwise. + * + * \param first The beginning of the sequence to copy from. + * \param last The end of the sequence to copy from. + * \param result The beginning of the sequence to copy to. + * \param pred The predicate to test on every value of the range [first,last). + * \param new_value The replacement value to assign pred(*i) evaluates to \c true. + * \return result + (last-first) + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. + * + * \code + * #include + * #include + * + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 1; + * A[1] = -3; + * A[2] = 2; + * A[3] = -1; + + * thrust::device_vector B(4); + * is_less_than_zero pred; + * + * thrust::replace_copy_if(A.begin(), A.end(), B.begin(), pred, 0); + * + * // B contains [1, 0, 2, 0] + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/replace_copy + * \see \c replace + * \see \c replace_if + * \see \c replace_copy + */ +template + OutputIterator replace_copy_if(InputIterator first, InputIterator last, + OutputIterator result, + Predicate pred, + const T &new_value); + + +/*! This version of \p replace_copy_if copies elements from the range [first, last) to the range + * [result, result + (last-first)), except that any element whose corresponding stencil + * element causes \p pred to be \c true is not copied; \p new_value is copied instead. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p replace_copy_if performs the assignment *(result+n) = new_value if + * pred(*(stencil+n)), and *(result+n) = *(first+n) otherwise. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to copy from. + * \param last The end of the sequence to copy from. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the sequence to copy to. + * \param pred The predicate to test on every value of the range [stencil, stencil + (last - first)). + * \param new_value The replacement value to assign when pred(*s) evaluates to \c true. + * \return result + (last-first) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator. + * \tparam InputIterator2 is a model of Input Iterator + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. + * \pre \p stencil may equal \p result, but the ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap otherwise. + * + * \code + * #include + * #include + * #include + * + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 10; + * A[1] = 20; + * A[2] = 30; + * A[3] = 40; + * + * thrust::device_vector S(4); + * S[0] = -1; + * S[1] = 0; + * S[2] = -1; + * S[3] = 0; + * + * thrust::device_vector B(4); + * is_less_than_zero pred; + * + * thrust::replace_if(thrust::device, A.begin(), A.end(), S.begin(), B.begin(), pred, 0); + * + * // B contains [0, 20, 0, 40] + * \endcode + * + * \see \c replace_copy + * \see \c replace_if + */ +template +__host__ __device__ + OutputIterator replace_copy_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, InputIterator1 last, + InputIterator2 stencil, + OutputIterator result, + Predicate pred, + const T &new_value); + + +/*! This version of \p replace_copy_if copies elements from the range [first, last) to the range + * [result, result + (last-first)), except that any element whose corresponding stencil + * element causes \p pred to be \c true is not copied; \p new_value is copied instead. + * + * More precisely, for every integer \c n such that 0 <= n < last-first, + * \p replace_copy_if performs the assignment *(result+n) = new_value if + * pred(*(stencil+n)), and *(result+n) = *(first+n) otherwise. + * + * \param first The beginning of the sequence to copy from. + * \param last The end of the sequence to copy from. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the sequence to copy to. + * \param pred The predicate to test on every value of the range [stencil, stencil + (last - first)). + * \param new_value The replacement value to assign when pred(*s) evaluates to \c true. + * \return result + (last-first) + * + * \tparam InputIterator1 is a model of Input Iterator. + * \tparam InputIterator2 is a model of Input Iterator + * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam Predicate is a model of Predicate. + * \tparam T is a model of Assignable, + * and \p T is convertible to \p OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the ranges [first, last) and [result, result + (last - first)) shall not overlap otherwise. + * \pre \p stencil may equal \p result, but the ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap otherwise. + * + * \code + * #include + * #include + * + * struct is_less_than_zero + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x < 0; + * } + * }; + * + * ... + * + * thrust::device_vector A(4); + * A[0] = 10; + * A[1] = 20; + * A[2] = 30; + * A[3] = 40; + * + * thrust::device_vector S(4); + * S[0] = -1; + * S[1] = 0; + * S[2] = -1; + * S[3] = 0; + * + * thrust::device_vector B(4); + * is_less_than_zero pred; + * + * thrust::replace_if(A.begin(), A.end(), S.begin(), B.begin(), pred, 0); + * + * // B contains [0, 20, 0, 40] + * \endcode + * + * \see \c replace_copy + * \see \c replace_if + */ +template + OutputIterator replace_copy_if(InputIterator1 first, InputIterator1 last, + InputIterator2 stencil, + OutputIterator result, + Predicate pred, + const T &new_value); + + +/*! \} // end replacing + * \} // transformations + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/reverse.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/reverse.h new file mode 100644 index 0000000000000000000000000000000000000000..056be200adcaea6d0e5deed224a452b26d1fa8c1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/reverse.h @@ -0,0 +1,211 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file reverse.h + * \brief Reverses the order of a range + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reordering + * \ingroup algorithms + */ + + +/*! \p reverse reverses a range. That is: for every i such that + * 0 <= i <= (last - first) / 2, it exchanges *(first + i) + * and *(last - (i + 1)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range to reverse. + * \param last The end of the range to reverse. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam BidirectionalIterator is a model of Bidirectional Iterator and + * \p BidirectionalIterator is mutable. + * + * The following code snippet demonstrates how to use \p reverse to reverse a + * \p device_vector of integers using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 6; + * int data[N] = {0, 1, 2, 3, 4, 5}; + * thrust::device_vector v(data, data + N); + * thrust::reverse(thrust::device, v.begin(), v.end()); + * // v is now {5, 4, 3, 2, 1, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/reverse + * \see \p reverse_copy + * \see \p reverse_iterator + */ +template +__host__ __device__ + void reverse(const thrust::detail::execution_policy_base &exec, + BidirectionalIterator first, + BidirectionalIterator last); + + +/*! \p reverse reverses a range. That is: for every i such that + * 0 <= i <= (last - first) / 2, it exchanges *(first + i) + * and *(last - (i + 1)). + * + * \param first The beginning of the range to reverse. + * \param last The end of the range to reverse. + * + * \tparam BidirectionalIterator is a model of Bidirectional Iterator and + * \p BidirectionalIterator is mutable. + * + * The following code snippet demonstrates how to use \p reverse to reverse a + * \p device_vector of integers. + * + * \code + * #include + * ... + * const int N = 6; + * int data[N] = {0, 1, 2, 3, 4, 5}; + * thrust::device_vector v(data, data + N); + * thrust::reverse(v.begin(), v.end()); + * // v is now {5, 4, 3, 2, 1, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/reverse + * \see \p reverse_copy + * \see \p reverse_iterator + */ +template + void reverse(BidirectionalIterator first, + BidirectionalIterator last); + + +/*! \p reverse_copy differs from \p reverse only in that the reversed range + * is written to a different output range, rather than inplace. + * + * \p reverse_copy copies elements from the range [first, last) to the + * range [result, result + (last - first)) such that the copy is a + * reverse of the original range. Specifically: for every i such that + * 0 <= i < (last - first), \p reverse_copy performs the assignment + * *(result + (last - first) - i) = *(first + i). + * + * The return value is result + (last - first)). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the range to reverse. + * \param last The end of the range to reverse. + * \param result The beginning of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam BidirectionalIterator is a model of Bidirectional Iterator, + * and \p BidirectionalIterator's \p value_type is convertible to \p OutputIterator's \p value_type. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The range [first, last) and the range [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p reverse_copy to reverse + * an input \p device_vector of integers to an output \p device_vector using the \p thrust::device + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 6; + * int data[N] = {0, 1, 2, 3, 4, 5}; + * thrust::device_vector input(data, data + N); + * thrust::device_vector output(N); + * thrust::reverse_copy(thrust::device, v.begin(), v.end(), output.begin()); + * // input is still {0, 1, 2, 3, 4, 5} + * // output is now {5, 4, 3, 2, 1, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/reverse_copy + * \see \p reverse + * \see \p reverse_iterator + */ +template +__host__ __device__ + OutputIterator reverse_copy(const thrust::detail::execution_policy_base &exec, + BidirectionalIterator first, + BidirectionalIterator last, + OutputIterator result); + + +/*! \p reverse_copy differs from \p reverse only in that the reversed range + * is written to a different output range, rather than inplace. + * + * \p reverse_copy copies elements from the range [first, last) to the + * range [result, result + (last - first)) such that the copy is a + * reverse of the original range. Specifically: for every i such that + * 0 <= i < (last - first), \p reverse_copy performs the assignment + * *(result + (last - first) - i) = *(first + i). + * + * The return value is result + (last - first)). + * + * \param first The beginning of the range to reverse. + * \param last The end of the range to reverse. + * \param result The beginning of the output range. + * + * \tparam BidirectionalIterator is a model of Bidirectional Iterator, + * and \p BidirectionalIterator's \p value_type is convertible to \p OutputIterator's \p value_type. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The range [first, last) and the range [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p reverse_copy to reverse + * an input \p device_vector of integers to an output \p device_vector. + * + * \code + * #include + * ... + * const int N = 6; + * int data[N] = {0, 1, 2, 3, 4, 5}; + * thrust::device_vector input(data, data + N); + * thrust::device_vector output(N); + * thrust::reverse_copy(v.begin(), v.end(), output.begin()); + * // input is still {0, 1, 2, 3, 4, 5} + * // output is now {5, 4, 3, 2, 1, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/reverse_copy + * \see \p reverse + * \see \p reverse_iterator + */ +template + OutputIterator reverse_copy(BidirectionalIterator first, + BidirectionalIterator last, + OutputIterator result); + + +/*! \} // end reordering + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/scan.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/scan.h new file mode 100644 index 0000000000000000000000000000000000000000..9b381422323f203b2995335089c0c8a5910cdbb2 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/scan.h @@ -0,0 +1,1656 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file scan.h + * \brief Functions for computing prefix sums + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + + +/*! \addtogroup prefixsums Prefix Sums + * \ingroup algorithms + * \{ + */ + + +/*! \p inclusive_scan computes an inclusive prefix sum operation. The + * term 'inclusive' means that each result includes the corresponding + * input operand in the partial sum. More precisely, *first is + * assigned to *result and the sum of *first and + * *(first + 1) is assigned to *(result + 1), and so on. + * This version of \p inclusive_scan assumes plus as the associative operator. + * When the input and output sequences are the same, the scan is performed + * in-place. + * + * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary + * difference between the two functions is that \c std::partial_sum guarantees + * a serial summation order, while \p inclusive_scan requires associativity of + * the binary operation to parallelize the prefix sum. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's + * \c value_type, then x + y is defined. If \c T is + * \c OutputIterator's \c value_type, then T(0) is + * defined. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan to compute an in-place + * prefix sum using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::inclusive_scan(thrust::host, data, data + 6, data); // in-place scan + * + * // data is now {1, 1, 3, 5, 6, 9} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + * + */ +template +__host__ __device__ + OutputIterator inclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p inclusive_scan computes an inclusive prefix sum operation. The + * term 'inclusive' means that each result includes the corresponding + * input operand in the partial sum. More precisely, *first is + * assigned to *result and the sum of *first and + * *(first + 1) is assigned to *(result + 1), and so on. + * This version of \p inclusive_scan assumes plus as the associative operator. + * When the input and output sequences are the same, the scan is performed + * in-place. + * + * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary + * difference between the two functions is that \c std::partial_sum guarantees + * a serial summation order, while \p inclusive_scan requires associativity of + * the binary operation to parallelize the prefix sum. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's + * \c value_type, then x + y is defined. If \c T is + * \c OutputIterator's \c value_type, then T(0) is + * defined. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan + * + * \code + * #include + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::inclusive_scan(data, data + 6, data); // in-place scan + * + * // data is now {1, 1, 3, 5, 6, 9} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + * + */ +template + OutputIterator inclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p inclusive_scan computes an inclusive prefix sum operation. The + * term 'inclusive' means that each result includes the corresponding + * input operand in the partial sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary + * difference between the two functions is that \c std::partial_sum guarantees + * a serial summation order, while \p inclusive_scan requires associativity of + * the binary operation to parallelize the prefix sum. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator + * and \c OutputIterator's \c value_type is convertible to + * both \c AssociativeOperator's \c first_argument_type and + * \c second_argument_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan to compute an in-place + * prefix sum using the \p thrust::host execution policy for parallelization: + * + * \code + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * thrust::maximum binary_op; + * + * thrust::inclusive_scan(thrust::host, data, data + 10, data, binary_op); // in-place scan + * + * // data is now {-5, 0, 2, 2, 2, 4, 4, 4, 4, 8} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template +__host__ __device__ + OutputIterator inclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + AssociativeOperator binary_op); + + +/*! \p inclusive_scan computes an inclusive prefix sum operation. The + * term 'inclusive' means that each result includes the corresponding + * input operand in the partial sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary + * difference between the two functions is that \c std::partial_sum guarantees + * a serial summation order, while \p inclusive_scan requires associativity of + * the binary operation to parallelize the prefix sum. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator + * and \c OutputIterator's \c value_type is convertible to + * both \c AssociativeOperator's \c first_argument_type and + * \c second_argument_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan + * + * \code + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * thrust::maximum binary_op; + * + * thrust::inclusive_scan(data, data + 10, data, binary_op); // in-place scan + * + * // data is now {-5, 0, 2, 2, 2, 4, 4, 4, 4, 8} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template + OutputIterator inclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result, + AssociativeOperator binary_op); + + +/*! \p exclusive_scan computes an exclusive prefix sum operation. The + * term 'exclusive' means that each result does not include the + * corresponding input operand in the partial sum. More precisely, + * 0 is assigned to *result and the sum of + * 0 and *first is assigned to *(result + 1), + * and so on. This version of \p exclusive_scan assumes plus as the + * associative operator and \c 0 as the initial value. When the input and + * output sequences are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's + * \c value_type, then x + y is defined. If \c T is + * \c OutputIterator's \c value_type, then T(0) is + * defined. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan to compute an in-place + * prefix sum using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::exclusive_scan(thrust::host, data, data + 6, data); // in-place scan + * + * // data is now {0, 1, 1, 3, 5, 6} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template +__host__ __device__ + OutputIterator exclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p exclusive_scan computes an exclusive prefix sum operation. The + * term 'exclusive' means that each result does not include the + * corresponding input operand in the partial sum. More precisely, + * 0 is assigned to *result and the sum of + * 0 and *first is assigned to *(result + 1), + * and so on. This version of \p exclusive_scan assumes plus as the + * associative operator and \c 0 as the initial value. When the input and + * output sequences are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's + * \c value_type, then x + y is defined. If \c T is + * \c OutputIterator's \c value_type, then T(0) is + * defined. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan + * + * \code + * #include + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::exclusive_scan(data, data + 6, data); // in-place scan + * + * // data is now {0, 1, 1, 3, 5, 6} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template + OutputIterator exclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p exclusive_scan computes an exclusive prefix sum operation. The + * term 'exclusive' means that each result does not include the + * corresponding input operand in the partial sum. More precisely, + * \p init is assigned to *result and the sum of \p init and + * *first is assigned to *(result + 1), and so on. + * This version of \p exclusive_scan assumes plus as the associative + * operator but requires an initial value \p init. When the input and + * output sequences are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param init The initial value. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's + * \c value_type, then x + y is defined. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan to compute an in-place + * prefix sum using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::exclusive_scan(thrust::host, data, data + 6, data, 4); // in-place scan + * + * // data is now {4, 5, 5, 7, 9, 10} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template +__host__ __device__ + OutputIterator exclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + T init); + + +/*! \p exclusive_scan computes an exclusive prefix sum operation. The + * term 'exclusive' means that each result does not include the + * corresponding input operand in the partial sum. More precisely, + * \p init is assigned to *result and the sum of \p init and + * *first is assigned to *(result + 1), and so on. + * This version of \p exclusive_scan assumes plus as the associative + * operator but requires an initial value \p init. When the input and + * output sequences are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param init The initial value. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's + * \c value_type, then x + y is defined. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan + * + * \code + * #include + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::exclusive_scan(data, data + 6, data, 4); // in-place scan + * + * // data is now {4, 5, 5, 7, 9, 10} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template + OutputIterator exclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result, + T init); + + +/*! \p exclusive_scan computes an exclusive prefix sum operation. The + * term 'exclusive' means that each result does not include the + * corresponding input operand in the partial sum. More precisely, + * \p init is assigned to \*result and the value + * binary_op(init, \*first) is assigned to \*(result + 1), + * and so on. This version of the function requires both an associative + * operator and an initial value \p init. When the input and output + * sequences are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param init The initial value. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator + * and \c OutputIterator's \c value_type is convertible to + * both \c AssociativeOperator's \c first_argument_type and + * \c second_argument_type. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan to compute an in-place + * prefix sum using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * thrust::maximum binary_op; + * + * thrust::exclusive_scan(thrust::host, data, data + 10, data, 1, binary_op); // in-place scan + * + * // data is now {1, 1, 1, 2, 2, 2, 4, 4, 4, 4 } + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template +__host__ __device__ + OutputIterator exclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + T init, + AssociativeOperator binary_op); + + +/*! \p exclusive_scan computes an exclusive prefix sum operation. The + * term 'exclusive' means that each result does not include the + * corresponding input operand in the partial sum. More precisely, + * \p init is assigned to \*result and the value + * binary_op(init, \*first) is assigned to \*(result + 1), + * and so on. This version of the function requires both an associative + * operator and an initial value \p init. When the input and output + * sequences are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param init The initial value. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to + * \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator + * and \c OutputIterator's \c value_type is convertible to + * both \c AssociativeOperator's \c first_argument_type and + * \c second_argument_type. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan + * + * \code + * #include + * #include + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * thrust::maximum binary_op; + * + * thrust::exclusive_scan(data, data + 10, data, 1, binary_op); // in-place scan + * + * // data is now {1, 1, 1, 2, 2, 2, 4, 4, 4, 4 } + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/partial_sum + */ +template + OutputIterator exclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result, + T init, + AssociativeOperator binary_op); + + +/*! \addtogroup segmentedprefixsums Segmented Prefix Sums + * \ingroup prefixsums + * \{ + */ + + +/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix + * sum operation. The term 'inclusive' means that each result includes + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate inclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p inclusive_scan_by_key assumes \c equal_to as the binary + * predicate used to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1) + * belong to the same segment if *i == *(i+1), and belong to + * different segments otherwise. + * + * This version of \p inclusive_scan_by_key assumes \c plus as the associative + * operator used to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan_by_key using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * + * thrust::inclusive_scan_by_key(thrust::host, keys, keys + 10, data, data); // in-place scan + * + * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; + * \endcode + * + * \see inclusive_scan + * \see exclusive_scan_by_key + * + */ +template +__host__ __device__ + OutputIterator inclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result); + + +/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix + * sum operation. The term 'inclusive' means that each result includes + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate inclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p inclusive_scan_by_key assumes \c equal_to as the binary + * predicate used to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1) + * belong to the same segment if *i == *(i+1), and belong to + * different segments otherwise. + * + * This version of \p inclusive_scan_by_key assumes \c plus as the associative + * operator used to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan_by_key + * + * \code + * #include + * + * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * + * thrust::inclusive_scan_by_key(keys, keys + 10, data, data); // in-place scan + * + * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; + * \endcode + * + * \see inclusive_scan + * \see exclusive_scan_by_key + * + */ +template + OutputIterator inclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result); + + +/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix + * sum operation. The term 'inclusive' means that each result includes + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate inclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p inclusive_scan_by_key uses the binary predicate + * \c pred to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1) + * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to + * different segments otherwise. + * + * This version of \p inclusive_scan_by_key assumes \c plus as the associative + * operator used to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param binary_pred The binary predicate used to determine equality of keys. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan_by_key using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * + * thrust::equal_to binary_pred; + * + * thrust::inclusive_scan_by_key(thrust::host, keys, keys + 10, data, data, binary_pred); // in-place scan + * + * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; + * \endcode + * + * \see inclusive_scan + * \see exclusive_scan_by_key + * + */ +template +__host__ __device__ + OutputIterator inclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + BinaryPredicate binary_pred); + + +/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix + * sum operation. The term 'inclusive' means that each result includes + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate inclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p inclusive_scan_by_key uses the binary predicate + * \c pred to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1) + * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to + * different segments otherwise. + * + * This version of \p inclusive_scan_by_key assumes \c plus as the associative + * operator used to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param binary_pred The binary predicate used to determine equality of keys. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan_by_key + * + * \code + * #include + * #include + * + * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * + * thrust::equal_to binary_pred; + * + * thrust::inclusive_scan_by_key(keys, keys + 10, data, data, binary_pred); // in-place scan + * + * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; + * \endcode + * + * \see inclusive_scan + * \see exclusive_scan_by_key + * + */ +template + OutputIterator inclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + BinaryPredicate binary_pred); + + +/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix + * sum operation. The term 'inclusive' means that each result includes + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate inclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p inclusive_scan_by_key uses the binary predicate + * \c pred to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1) + * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to + * different segments otherwise. + * + * This version of \p inclusive_scan_by_key uses the associative operator + * \c binary_op to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param binary_pred The binary predicate used to determine equality of keys. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * \tparam BinaryPredicate is a model of Binary Predicate. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan_by_key using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * + * thrust::equal_to binary_pred; + * thrust::plus binary_op; + * + * thrust::inclusive_scan_by_key(thrust::host, keys, keys + 10, data, data, binary_pred, binary_op); // in-place scan + * + * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; + * \endcode + * + * \see inclusive_scan + * \see exclusive_scan_by_key + * + */ +template +__host__ __device__ + OutputIterator inclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + BinaryPredicate binary_pred, + AssociativeOperator binary_op); + + +/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix + * sum operation. The term 'inclusive' means that each result includes + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate inclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p inclusive_scan_by_key uses the binary predicate + * \c pred to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1) + * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to + * different segments otherwise. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * This version of \p inclusive_scan_by_key uses the associative operator + * \c binary_op to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param binary_pred The binary predicate used to determine equality of keys. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * \tparam BinaryPredicate is a model of Binary Predicate. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p inclusive_scan_by_key + * + * \code + * #include + * #include + * + * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * + * thrust::equal_to binary_pred; + * thrust::plus binary_op; + * + * thrust::inclusive_scan_by_key(keys, keys + 10, data, data, binary_pred, binary_op); // in-place scan + * + * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; + * \endcode + * + * \see inclusive_scan + * \see exclusive_scan_by_key + * + */ +template + OutputIterator inclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + BinaryPredicate binary_pred, + AssociativeOperator binary_op); + + +/*! \p exclusive_scan_by_key computes an exclusive segmented prefix + * + * This version of \p exclusive_scan_by_key uses the value \c 0 to + * initialize the exclusive scan operation. + * + * This version of \p exclusive_scan_by_key assumes \c plus as the associative + * operator used to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * This version of \p exclusive_scan_by_key assumes \c equal_to as the binary + * predicate used to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1 + * belong to the same segment if *i == *(i+1), and belong to + * different segments otherwise. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * Refer to the most general form of \p exclusive_scan_by_key for additional details. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals); // in-place scan + * + * // vals is now {0, 1, 2, 0, 1, 0, 0, 1, 2, 3}; + * \endcode + * + * \see exclusive_scan + * + */ +template +__host__ __device__ + OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result); + + +/*! \p exclusive_scan_by_key computes an exclusive segmented prefix + * + * This version of \p exclusive_scan_by_key uses the value \c 0 to + * initialize the exclusive scan operation. + * + * This version of \p exclusive_scan_by_key assumes \c plus as the associative + * operator used to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * This version of \p exclusive_scan_by_key assumes \c equal_to as the binary + * predicate used to compare adjacent keys. Specifically, consecutive iterators + * i and i+1 in the range [first1, last1 + * belong to the same segment if *i == *(i+1), and belong to + * different segments otherwise. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * Refer to the most general form of \p exclusive_scan_by_key for additional details. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key. + * + * \code + * #include + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * thrust::exclusive_scan_by_key(key, key + 10, vals, vals); // in-place scan + * + * // vals is now {0, 1, 2, 0, 1, 0, 0, 1, 2, 3}; + * \endcode + * + * \see exclusive_scan + * + */ +template + OutputIterator exclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result); + + +/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix + * sum operation. The term 'exclusive' means that each result does not include + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate exclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p exclusive_scan_by_key uses the value \c init to + * initialize the exclusive scan operation. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param init The initial of the exclusive sum value. + * \return The end of the output sequence. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the \p + * thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * int init = 5; + * + * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals, init); // in-place scan + * + * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; + * \endcode + * + * \see exclusive_scan + * \see inclusive_scan_by_key + * + */ +template +__host__ __device__ + OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + T init); + + +/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix + * sum operation. The term 'exclusive' means that each result does not include + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate exclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p exclusive_scan_by_key uses the value \c init to + * initialize the exclusive scan operation. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param init The initial of the exclusive sum value. + * \return The end of the output sequence. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key + * + * \code + * #include + * #include + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * int init = 5; + * + * thrust::exclusive_scan_by_key(key, key + 10, vals, vals, init); // in-place scan + * + * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; + * \endcode + * + * \see exclusive_scan + * \see inclusive_scan_by_key + * + */ +template + OutputIterator exclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + T init); + + +/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix + * sum operation. The term 'exclusive' means that each result does not include + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate exclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p exclusive_scan_by_key uses the value \c init to + * initialize the exclusive scan operation. + * + * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred + * to compare adjacent keys. Specifically, consecutive iterators i and + * i+1 in the range [first1, last1) belong to the same segment if + * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param init The initial of the exclusive sum value. + * \param binary_pred The binary predicate used to determine equality of keys. + * \return The end of the output sequence. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * int init = 5; + * + * thrust::equal_to binary_pred; + * + * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals, init, binary_pred); // in-place scan + * + * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; + * \endcode + * + * \see exclusive_scan + * \see inclusive_scan_by_key + * + */ +template +__host__ __device__ + OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + T init, + BinaryPredicate binary_pred); + + +/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix + * sum operation. The term 'exclusive' means that each result does not include + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate exclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p exclusive_scan_by_key uses the value \c init to + * initialize the exclusive scan operation. + * + * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred + * to compare adjacent keys. Specifically, consecutive iterators i and + * i+1 in the range [first1, last1) belong to the same segment if + * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param init The initial of the exclusive sum value. + * \param binary_pred The binary predicate used to determine equality of keys. + * \return The end of the output sequence. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key + * + * \code + * #include + * #include + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * int init = 5; + * + * thrust::equal_to binary_pred; + * + * thrust::exclusive_scan_by_key(key, key + 10, vals, vals, init, binary_pred); // in-place scan + * + * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; + * \endcode + * + * \see exclusive_scan + * \see inclusive_scan_by_key + * + */ +template + OutputIterator exclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + T init, + BinaryPredicate binary_pred); + + +/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix + * sum operation. The term 'exclusive' means that each result does not include + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate exclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p exclusive_scan_by_key uses the value \c init to + * initialize the exclusive scan operation. + * + * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred + * to compare adjacent keys. Specifically, consecutive iterators i and + * i+1 in the range [first1, last1) belong to the same segment if + * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. + * + * This version of \p exclusive_scan_by_key uses the associative operator + * \c binary_op to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param init The initial of the exclusive sum value. + * \param binary_pred The binary predicate used to determine equality of keys. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * int init = 5; + * + * thrust::equal_to binary_pred; + * thrust::plus binary_op; + * + * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals, init, binary_pred, binary_op); // in-place scan + * + * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; + * \endcode + * + * \see exclusive_scan + * \see inclusive_scan_by_key + * + */ +template +__host__ __device__ + OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + T init, + BinaryPredicate binary_pred, + AssociativeOperator binary_op); + + +/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix + * sum operation. The term 'exclusive' means that each result does not include + * the corresponding input operand in the partial sum. The term 'segmented' + * means that the partial sums are broken into distinct segments. In other + * words, within each segment a separate exclusive scan operation is computed. + * Refer to the code sample below for example usage. + * + * This version of \p exclusive_scan_by_key uses the value \c init to + * initialize the exclusive scan operation. + * + * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred + * to compare adjacent keys. Specifically, consecutive iterators i and + * i+1 in the range [first1, last1) belong to the same segment if + * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. + * + * This version of \p exclusive_scan_by_key uses the associative operator + * \c binary_op to perform the prefix sum. When the input and output sequences + * are the same, the scan is performed in-place. + * + * Results are not deterministic for pseudo-associative operators (e.g., + * addition of floating-point types). Results for pseudo-associative + * operators may vary from run to run. + * + * \param first1 The beginning of the key sequence. + * \param last1 The end of the key sequence. + * \param first2 The beginning of the input value sequence. + * \param result The beginning of the output value sequence. + * \param init The initial of the exclusive sum value. + * \param binary_pred The binary predicate used to determine equality of keys. + * \param binary_op The associatve operator used to 'sum' values. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam OutputIterator is a model of Output Iterator, + * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then + * binary_op(x,y) is defined. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. + * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p exclusive_scan_by_key + * + * \code + * #include + * #include + * + * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; + * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + * + * int init = 5; + * + * thrust::equal_to binary_pred; + * thrust::plus binary_op; + * + * thrust::exclusive_scan_by_key(key, key + 10, vals, vals, init, binary_pred, binary_op); // in-place scan + * + * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; + * \endcode + * + * \see exclusive_scan + * \see inclusive_scan_by_key + * + */ +template + OutputIterator exclusive_scan_by_key(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + T init, + BinaryPredicate binary_pred, + AssociativeOperator binary_op); + + +/*! \} // end segmentedprefixsums + */ + + +/*! \} // end prefix sums + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/scatter.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..b8b0bd84f52a511a7b58eedbeaa1d34873ee0893 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/scatter.h @@ -0,0 +1,419 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file scatter.h + * \brief Irregular copying to a destination range + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup scattering + * \ingroup copying + * \{ + */ + + +/*! \p scatter copies elements from a source range into an output array + * according to a map. For each iterator \c i in the range [\p first, \p last), + * the value \c *i is assigned to output[*(map + (i - first))]. The + * output iterator must permit random access. If the same index + * appears more than once in the range [map, map + (last - first)), + * the result is undefined. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first Beginning of the sequence of values to scatter. + * \param last End of the sequence of values to scatter. + * \param map Beginning of the sequence of output indices. + * \param result Destination of the source elements. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 must be a model of Input Iterator and \c InputIterator1's \c value_type must be convertible to \c RandomAccessIterator's \c value_type. + * \tparam InputIterator2 must be a model of Input Iterator and \c InputIterator2's \c value_type must be convertible to \c RandomAccessIterator's \c difference_type. + * \tparam RandomAccessIterator must be a model of Random Access iterator. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[first,last)` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[map,map + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The expression `result[*i]` shall be valid for all iterators in the range `[map,map + (last - first))`. + * + * The following code snippet demonstrates how to use \p scatter to + * reorder a range using the \p thrust::device execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * // mark even indices with a 1; odd indices with a 0 + * int values[10] = {1, 0, 1, 0, 1, 0, 1, 0, 1, 0}; + * thrust::device_vector d_values(values, values + 10); + * + * // scatter all even indices into the first half of the + * // range, and odd indices vice versa + * int map[10] = {0, 5, 1, 6, 2, 7, 3, 8, 4, 9}; + * thrust::device_vector d_map(map, map + 10); + * + * thrust::device_vector d_output(10); + * thrust::scatter(thrust::device, + * d_values.begin(), d_values.end(), + * d_map.begin(), d_output.begin()); + * // d_output is now {1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + * \endcode + * + * \note \p scatter is the inverse of thrust::gather. + */ +template +__host__ __device__ + void scatter(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 map, + RandomAccessIterator result); + + +/*! \p scatter copies elements from a source range into an output array + * according to a map. For each iterator \c i in the range [\p first, \p last), + * the value \c *i is assigned to output[*(map + (i - first))]. The + * output iterator must permit random access. If the same index + * appears more than once in the range [map, map + (last - first)), + * the result is undefined. + * + * \param first Beginning of the sequence of values to scatter. + * \param last End of the sequence of values to scatter. + * \param map Beginning of the sequence of output indices. + * \param result Destination of the source elements. + * + * \tparam InputIterator1 must be a model of Input Iterator and \c InputIterator1's \c value_type must be convertible to \c RandomAccessIterator's \c value_type. + * \tparam InputIterator2 must be a model of Input Iterator and \c InputIterator2's \c value_type must be convertible to \c RandomAccessIterator's \c difference_type. + * \tparam RandomAccessIterator must be a model of Random Access iterator. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[first,last)` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[map,map + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The expression `result[*i]` shall be valid for all iterators in the range `[map,map + (last - first))`. + * + * The following code snippet demonstrates how to use \p scatter to + * reorder a range. + * + * \code + * #include + * #include + * ... + * // mark even indices with a 1; odd indices with a 0 + * int values[10] = {1, 0, 1, 0, 1, 0, 1, 0, 1, 0}; + * thrust::device_vector d_values(values, values + 10); + * + * // scatter all even indices into the first half of the + * // range, and odd indices vice versa + * int map[10] = {0, 5, 1, 6, 2, 7, 3, 8, 4, 9}; + * thrust::device_vector d_map(map, map + 10); + * + * thrust::device_vector d_output(10); + * thrust::scatter(d_values.begin(), d_values.end(), + * d_map.begin(), d_output.begin()); + * // d_output is now {1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + * \endcode + * + * \note \p scatter is the inverse of thrust::gather. + */ +template + void scatter(InputIterator1 first, + InputIterator1 last, + InputIterator2 map, + RandomAccessIterator result); + + +/*! \p scatter_if conditionally copies elements from a source range into an + * output array according to a map. For each iterator \c i in the + * range [first, last) such that *(stencil + (i - first)) is + * true, the value \c *i is assigned to output[*(map + (i - first))]. + * The output iterator must permit random access. If the same index + * appears more than once in the range [map, map + (last - first)) + * the result is undefined. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first Beginning of the sequence of values to scatter. + * \param last End of the sequence of values to scatter. + * \param map Beginning of the sequence of output indices. + * \param stencil Beginning of the sequence of predicate values. + * \param output Beginning of the destination range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 must be a model of Input Iterator and \c InputIterator1's \c value_type must be convertible to \c RandomAccessIterator's \c value_type. + * \tparam InputIterator2 must be a model of Input Iterator and \c InputIterator2's \c value_type must be convertible to \c RandomAccessIterator's \c difference_type. + * \tparam InputIterator3 must be a model of Input Iterator and \c InputIterator3's \c value_type must be convertible to \c bool. + * \tparam RandomAccessIterator must be a model of Random Access iterator. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[first,last)` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[map,map + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[stencil,stencil + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The expression `result[*i]` shall be valid for all iterators `i` in the range `[map,map + (last - first))` for which the following condition holds: `*(stencil + i) != false`. + * + * \code + * #include + * #include + * ... + * int V[8] = {10, 20, 30, 40, 50, 60, 70, 80}; + * int M[8] = {0, 5, 1, 6, 2, 7, 3, 4}; + * int S[8] = {1, 0, 1, 0, 1, 0, 1, 0}; + * int D[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + * + * thrust::scatter_if(thrust::host, V, V + 8, M, S, D); + * + * // D contains [10, 30, 50, 70, 0, 0, 0, 0]; + * \endcode + * + * \note \p scatter_if is the inverse of thrust::gather_if. + */ +template +__host__ __device__ + void scatter_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 map, + InputIterator3 stencil, + RandomAccessIterator output); + + +/*! \p scatter_if conditionally copies elements from a source range into an + * output array according to a map. For each iterator \c i in the + * range [first, last) such that *(stencil + (i - first)) is + * true, the value \c *i is assigned to output[*(map + (i - first))]. + * The output iterator must permit random access. If the same index + * appears more than once in the range [map, map + (last - first)) + * the result is undefined. + * + * \param first Beginning of the sequence of values to scatter. + * \param last End of the sequence of values to scatter. + * \param map Beginning of the sequence of output indices. + * \param stencil Beginning of the sequence of predicate values. + * \param output Beginning of the destination range. + * + * \tparam InputIterator1 must be a model of Input Iterator and \c InputIterator1's \c value_type must be convertible to \c RandomAccessIterator's \c value_type. + * \tparam InputIterator2 must be a model of Input Iterator and \c InputIterator2's \c value_type must be convertible to \c RandomAccessIterator's \c difference_type. + * \tparam InputIterator3 must be a model of Input Iterator and \c InputIterator3's \c value_type must be convertible to \c bool. + * \tparam RandomAccessIterator must be a model of Random Access iterator. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[first,last)` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[map,map + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[stencil,stencil + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The expression `result[*i]` shall be valid for all iterators `i` in the range `[map,map + (last - first))` for which the following condition holds: `*(stencil + i) != false`. + * + * \code + * #include + * ... + * int V[8] = {10, 20, 30, 40, 50, 60, 70, 80}; + * int M[8] = {0, 5, 1, 6, 2, 7, 3, 4}; + * int S[8] = {1, 0, 1, 0, 1, 0, 1, 0}; + * int D[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + * + * thrust::scatter_if(V, V + 8, M, S, D); + * + * // D contains [10, 30, 50, 70, 0, 0, 0, 0]; + * \endcode + * + * \note \p scatter_if is the inverse of thrust::gather_if. + */ +template + void scatter_if(InputIterator1 first, + InputIterator1 last, + InputIterator2 map, + InputIterator3 stencil, + RandomAccessIterator output); + + +/*! \p scatter_if conditionally copies elements from a source range into an + * output array according to a map. For each iterator \c i in the + * range [first, last) such that pred(*(stencil + (i - first))) is + * \c true, the value \c *i is assigned to output[*(map + (i - first))]. + * The output iterator must permit random access. If the same index + * appears more than once in the range [map, map + (last - first)) + * the result is undefined. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first Beginning of the sequence of values to scatter. + * \param last End of the sequence of values to scatter. + * \param map Beginning of the sequence of output indices. + * \param stencil Beginning of the sequence of predicate values. + * \param output Beginning of the destination range. + * \param pred Predicate to apply to the stencil values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 must be a model of Input Iterator and \c InputIterator1's \c value_type must be convertible to \c RandomAccessIterator's \c value_type. + * \tparam InputIterator2 must be a model of Input Iterator and \c InputIterator2's \c value_type must be convertible to \c RandomAccessIterator's \c difference_type. + * \tparam InputIterator3 must be a model of Input Iterator and \c InputIterator3's \c value_type must be convertible to \c Predicate's \c argument_type. + * \tparam RandomAccessIterator must be a model of Random Access iterator. + * \tparam Predicate must be a model of Predicate. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[first,last)` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[map,map + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[stencil,stencil + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The expression `result[*i]` shall be valid for all iterators `i` in the range `[map,map + (last - first))` for which the following condition holds: `pred(*(stencil + i)) != false`. + * + * \code + * #include + * #include + * + * struct is_even + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return (x % 2) == 0; + * } + * }; + * + * ... + * + * int V[8] = {10, 20, 30, 40, 50, 60, 70, 80}; + * int M[8] = {0, 5, 1, 6, 2, 7, 3, 4}; + * int S[8] = {2, 1, 2, 1, 2, 1, 2, 1}; + * int D[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + * + * is_even pred; + * thrust::scatter_if(thrust::host, V, V + 8, M, S, D, pred); + * + * // D contains [10, 30, 50, 70, 0, 0, 0, 0]; + * \endcode + * + * \note \p scatter_if is the inverse of thrust::gather_if. + */ +template +__host__ __device__ + void scatter_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, + InputIterator1 last, + InputIterator2 map, + InputIterator3 stencil, + RandomAccessIterator output, + Predicate pred); + + +/*! \p scatter_if conditionally copies elements from a source range into an + * output array according to a map. For each iterator \c i in the + * range [first, last) such that pred(*(stencil + (i - first))) is + * \c true, the value \c *i is assigned to output[*(map + (i - first))]. + * The output iterator must permit random access. If the same index + * appears more than once in the range [map, map + (last - first)) + * the result is undefined. + * + * \param first Beginning of the sequence of values to scatter. + * \param last End of the sequence of values to scatter. + * \param map Beginning of the sequence of output indices. + * \param stencil Beginning of the sequence of predicate values. + * \param output Beginning of the destination range. + * \param pred Predicate to apply to the stencil values. + * + * \tparam InputIterator1 must be a model of Input Iterator and \c InputIterator1's \c value_type must be convertible to \c RandomAccessIterator's \c value_type. + * \tparam InputIterator2 must be a model of Input Iterator and \c InputIterator2's \c value_type must be convertible to \c RandomAccessIterator's \c difference_type. + * \tparam InputIterator3 must be a model of Input Iterator and \c InputIterator3's \c value_type must be convertible to \c Predicate's \c argument_type. + * \tparam RandomAccessIterator must be a model of Random Access iterator. + * \tparam Predicate must be a model of Predicate. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[first,last)` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[map,map + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The iterator `result + i` shall not refer to any element referenced by any iterator `j` in the range `[stencil,stencil + (last - first))` for all iterators `i` in the range `[map,map + (last - first))`. + * + * \pre The expression `result[*i]` shall be valid for all iterators `i` in the range `[map,map + (last - first))` for which the following condition holds: `pred(*(stencil + i)) != false`. + * + * \code + * #include + * + * struct is_even + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return (x % 2) == 0; + * } + * }; + * + * ... + * + * int V[8] = {10, 20, 30, 40, 50, 60, 70, 80}; + * int M[8] = {0, 5, 1, 6, 2, 7, 3, 4}; + * int S[8] = {2, 1, 2, 1, 2, 1, 2, 1}; + * int D[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + * + * is_even pred; + * thrust::scatter_if(V, V + 8, M, S, D, pred); + * + * // D contains [10, 30, 50, 70, 0, 0, 0, 0]; + * \endcode + * + * \note \p scatter_if is the inverse of thrust::gather_if. + */ +template + void scatter_if(InputIterator1 first, + InputIterator1 last, + InputIterator2 map, + InputIterator3 stencil, + RandomAccessIterator output, + Predicate pred); + + +/*! \} // end scattering + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/set_operations.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/set_operations.h new file mode 100644 index 0000000000000000000000000000000000000000..65a48d1b66129d7f0c2884b399e82d196786962f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/set_operations.h @@ -0,0 +1,2959 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file set_operations.h + * \brief Set theoretic operations for sorted ranges + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup set_operations Set Operations + * \ingroup algorithms + * \{ + */ + + +/*! \p set_difference constructs a sorted range that is the set difference of the sorted + * ranges [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_difference performs the "difference" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1) and not contained in [first2, last1). The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [first1, last1) range shall be copied to the output range. + * + * This version of \p set_difference compares elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_difference to compute the + * set difference of two sets of integers sorted in ascending order using the \p thrust::host execution + * policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A1[7] = {0, 1, 3, 4, 5, 6, 9}; + * int A2[5] = {1, 3, 5, 7, 9}; + * + * int result[3]; + * + * int *result_end = thrust::set_difference(thrust::host, A1, A1 + 7, A2, A2 + 5, result); + * // result is now {0, 4, 6} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_difference + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_difference(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_difference constructs a sorted range that is the set difference of the sorted + * ranges [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_difference performs the "difference" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1) and not contained in [first2, last1). The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [first1, last1) range shall be copied to the output range. + * + * This version of \p set_difference compares elements using \c operator<. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_difference to compute the + * set difference of two sets of integers sorted in ascending order. + * + * \code + * #include + * ... + * int A1[7] = {0, 1, 3, 4, 5, 6, 9}; + * int A2[5] = {1, 3, 5, 7, 9}; + * + * int result[3]; + * + * int *result_end = thrust::set_difference(A1, A1 + 7, A2, A2 + 5, result); + * // result is now {0, 4, 6} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_difference + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_difference(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_difference constructs a sorted range that is the set difference of the sorted + * ranges [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_difference performs the "difference" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1) and not contained in [first2, last1). The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [first1, last1) range shall be copied to the output range. + * + * This version of \p set_difference compares elements using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_difference to compute the + * set difference of two sets of integers sorted in descending order using the \p thrust::host execution + * policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A1[7] = {9, 6, 5, 4, 3, 1, 0}; + * int A2[5] = {9, 7, 5, 3, 1}; + * + * int result[3]; + * + * int *result_end = thrust::set_difference(thrust::host, A1, A1 + 7, A2, A2 + 5, result, thrust::greater()); + * // result is now {6, 4, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_difference + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_difference(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_difference constructs a sorted range that is the set difference of the sorted + * ranges [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_difference performs the "difference" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1) and not contained in [first2, last1). The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [first1, last1) range shall be copied to the output range. + * + * This version of \p set_difference compares elements using a function object \p comp. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_difference to compute the + * set difference of two sets of integers sorted in descending order. + * + * \code + * #include + * #include + * ... + * int A1[7] = {9, 6, 5, 4, 3, 1, 0}; + * int A2[5] = {9, 7, 5, 3, 1}; + * + * int result[3]; + * + * int *result_end = thrust::set_difference(A1, A1 + 7, A2, A2 + 5, result, thrust::greater()); + * // result is now {6, 4, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_difference + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_difference(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_intersection constructs a sorted range that is the + * intersection of sorted ranges [first1, last1) and + * [first2, last2). The return value is the end of the + * output range. + * + * In the simplest case, \p set_intersection performs the + * "intersection" operation from set theory: the output range + * contains a copy of every element that is contained in both + * [first1, last1) and [first2, last2). The + * general case is more complicated, because the input ranges may + * contain duplicate elements. The generalization is that if a value + * appears \c m times in [first1, last1) and \c n times in + * [first2, last2) (where \c m may be zero), then it + * appears min(m,n) times in the output range. + * \p set_intersection is stable, meaning that both elements are + * copied from the first range rather than the second, and that the + * relative order of elements in the output range is the same as in + * the first input range. + * + * This version of \p set_intersection compares objects using + * \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_intersection to compute the + * set intersection of two sets of integers sorted in ascending order using the \p thrust::host execution + * policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A1[6] = {1, 3, 5, 7, 9, 11}; + * int A2[7] = {1, 1, 2, 3, 5, 8, 13}; + * + * int result[7]; + * + * int *result_end = thrust::set_intersection(thrust::host, A1, A1 + 6, A2, A2 + 7, result); + * // result is now {1, 3, 5} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_intersection + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_intersection(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_intersection constructs a sorted range that is the + * intersection of sorted ranges [first1, last1) and + * [first2, last2). The return value is the end of the + * output range. + * + * In the simplest case, \p set_intersection performs the + * "intersection" operation from set theory: the output range + * contains a copy of every element that is contained in both + * [first1, last1) and [first2, last2). The + * general case is more complicated, because the input ranges may + * contain duplicate elements. The generalization is that if a value + * appears \c m times in [first1, last1) and \c n times in + * [first2, last2) (where \c m may be zero), then it + * appears min(m,n) times in the output range. + * \p set_intersection is stable, meaning that both elements are + * copied from the first range rather than the second, and that the + * relative order of elements in the output range is the same as in + * the first input range. + * + * This version of \p set_intersection compares objects using + * \c operator<. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_intersection to compute the + * set intersection of two sets of integers sorted in ascending order. + * + * \code + * #include + * ... + * int A1[6] = {1, 3, 5, 7, 9, 11}; + * int A2[7] = {1, 1, 2, 3, 5, 8, 13}; + * + * int result[7]; + * + * int *result_end = thrust::set_intersection(A1, A1 + 6, A2, A2 + 7, result); + * // result is now {1, 3, 5} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_intersection + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_intersection(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_intersection constructs a sorted range that is the + * intersection of sorted ranges [first1, last1) and + * [first2, last2). The return value is the end of the + * output range. + * + * In the simplest case, \p set_intersection performs the + * "intersection" operation from set theory: the output range + * contains a copy of every element that is contained in both + * [first1, last1) and [first2, last2). The + * general case is more complicated, because the input ranges may + * contain duplicate elements. The generalization is that if a value + * appears \c m times in [first1, last1) and \c n times in + * [first2, last2) (where \c m may be zero), then it + * appears min(m,n) times in the output range. + * \p set_intersection is stable, meaning that both elements are + * copied from the first range rather than the second, and that the + * relative order of elements in the output range is the same as in + * the first input range. + * + * This version of \p set_intersection compares elements using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * The following code snippet demonstrates how to use \p set_intersection to compute + * the set intersection of sets of integers sorted in descending order using the \p thrust::host execution + * policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A1[6] = {11, 9, 7, 5, 3, 1}; + * int A2[7] = {13, 8, 5, 3, 2, 1, 1}; + * + * int result[3]; + * + * int *result_end = thrust::set_intersection(thrust::host, A1, A1 + 6, A2, A2 + 7, result, thrust::greater()); + * // result is now {5, 3, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_intersection + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_intersection(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_intersection constructs a sorted range that is the + * intersection of sorted ranges [first1, last1) and + * [first2, last2). The return value is the end of the + * output range. + * + * In the simplest case, \p set_intersection performs the + * "intersection" operation from set theory: the output range + * contains a copy of every element that is contained in both + * [first1, last1) and [first2, last2). The + * general case is more complicated, because the input ranges may + * contain duplicate elements. The generalization is that if a value + * appears \c m times in [first1, last1) and \c n times in + * [first2, last2) (where \c m may be zero), then it + * appears min(m,n) times in the output range. + * \p set_intersection is stable, meaning that both elements are + * copied from the first range rather than the second, and that the + * relative order of elements in the output range is the same as in + * the first input range. + * + * This version of \p set_intersection compares elements using a function object \p comp. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * The following code snippet demonstrates how to use \p set_intersection to compute + * the set intersection of sets of integers sorted in descending order. + * + * \code + * #include + * ... + * int A1[6] = {11, 9, 7, 5, 3, 1}; + * int A2[7] = {13, 8, 5, 3, 2, 1, 1}; + * + * int result[3]; + * + * int *result_end = thrust::set_intersection(A1, A1 + 6, A2, A2 + 7, result, thrust::greater()); + * // result is now {5, 3, 1} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_intersection + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_intersection(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_symmetric_difference constructs a sorted range that is the set symmetric + * difference of the sorted ranges [first1, last1) and [first2, last2). + * The return value is the end of the output range. + * + * In the simplest case, \p set_symmetric_difference performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [first1, last1) but not [first2, last1), and a copy of + * every element that is contained in [first2, last2) but not [first1, last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements that are + * equivalent to each other and [first2, last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [first1, last1) if m > n, and + * the last n - m of these elements from [first2, last2) if m < n. + * + * This version of \p set_union compares elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference to compute + * the symmetric difference of two sets of integers sorted in ascending order using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A1[7] = {0, 1, 2, 2, 4, 6, 7}; + * int A2[5] = {1, 1, 2, 5, 8}; + * + * int result[6]; + * + * int *result_end = thrust::set_symmetric_difference(thrust::host, A1, A1 + 7, A2, A2 + 5, result); + * // result = {0, 4, 5, 6, 7, 8} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_symmetric_difference + * \see \p merge + * \see \p includes + * \see \p set_difference + * \see \p set_union + * \see \p set_intersection + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_symmetric_difference(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_symmetric_difference constructs a sorted range that is the set symmetric + * difference of the sorted ranges [first1, last1) and [first2, last2). + * The return value is the end of the output range. + * + * In the simplest case, \p set_symmetric_difference performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [first1, last1) but not [first2, last1), and a copy of + * every element that is contained in [first2, last2) but not [first1, last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements that are + * equivalent to each other and [first2, last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [first1, last1) if m > n, and + * the last n - m of these elements from [first2, last2) if m < n. + * + * This version of \p set_union compares elements using \c operator<. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference to compute + * the symmetric difference of two sets of integers sorted in ascending order. + * + * \code + * #include + * ... + * int A1[7] = {0, 1, 2, 2, 4, 6, 7}; + * int A2[5] = {1, 1, 2, 5, 8}; + * + * int result[6]; + * + * int *result_end = thrust::set_symmetric_difference(A1, A1 + 7, A2, A2 + 5, result); + * // result = {0, 4, 5, 6, 7, 8} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_symmetric_difference + * \see \p merge + * \see \p includes + * \see \p set_difference + * \see \p set_union + * \see \p set_intersection + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_symmetric_difference(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_symmetric_difference constructs a sorted range that is the set symmetric + * difference of the sorted ranges [first1, last1) and [first2, last2). + * The return value is the end of the output range. + * + * In the simplest case, \p set_symmetric_difference performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [first1, last1) but not [first2, last1), and a copy of + * every element that is contained in [first2, last2) but not [first1, last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements that are + * equivalent to each other and [first2, last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [first1, last1) if m > n, and + * the last n - m of these elements from [first2, last2) if m < n. + * + * This version of \p set_union compares elements using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference to compute + * the symmetric difference of two sets of integers sorted in descending order using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A1[7] = {7, 6, 4, 2, 2, 1, 0}; + * int A2[5] = {8, 5, 2, 1, 1}; + * + * int result[6]; + * + * int *result_end = thrust::set_symmetric_difference(thrust::host, A1, A1 + 7, A2, A2 + 5, result); + * // result = {8, 7, 6, 5, 4, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_symmetric_difference + * \see \p merge + * \see \p includes + * \see \p set_difference + * \see \p set_union + * \see \p set_intersection + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_symmetric_difference(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_symmetric_difference constructs a sorted range that is the set symmetric + * difference of the sorted ranges [first1, last1) and [first2, last2). + * The return value is the end of the output range. + * + * In the simplest case, \p set_symmetric_difference performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [first1, last1) but not [first2, last1), and a copy of + * every element that is contained in [first2, last2) but not [first1, last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements that are + * equivalent to each other and [first2, last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [first1, last1) if m > n, and + * the last n - m of these elements from [first2, last2) if m < n. + * + * This version of \p set_union compares elements using a function object \p comp. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference to compute + * the symmetric difference of two sets of integers sorted in descending order. + * + * \code + * #include + * ... + * int A1[7] = {7, 6, 4, 2, 2, 1, 0}; + * int A2[5] = {8, 5, 2, 1, 1}; + * + * int result[6]; + * + * int *result_end = thrust::set_symmetric_difference(A1, A1 + 7, A2, A2 + 5, result); + * // result = {8, 7, 6, 5, 4, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_symmetric_difference + * \see \p merge + * \see \p includes + * \see \p set_difference + * \see \p set_union + * \see \p set_intersection + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_symmetric_difference(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_union constructs a sorted range that is the union of the sorted ranges + * [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_union performs the "union" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1), [first2, last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * This version of \p set_union compares elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_union to compute the union of + * two sets of integers sorted in ascending order using the \p thrust::host execution policy for + * parallelization: + * + * \code + * #include + * #include + * ... + * int A1[7] = {0, 2, 4, 6, 8, 10, 12}; + * int A2[5] = {1, 3, 5, 7, 9}; + * + * int result[11]; + * + * int *result_end = thrust::set_union(thrust::host, A1, A1 + 7, A2, A2 + 5, result); + * // result = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_union + * \see \p merge + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_union(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_union constructs a sorted range that is the union of the sorted ranges + * [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_union performs the "union" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1), [first2, last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * This version of \p set_union compares elements using \c operator<. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to operator<. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_union to compute the union of + * two sets of integers sorted in ascending order. + * + * \code + * #include + * ... + * int A1[7] = {0, 2, 4, 6, 8, 10, 12}; + * int A2[5] = {1, 3, 5, 7, 9}; + * + * int result[11]; + * + * int *result_end = thrust::set_union(A1, A1 + 7, A2, A2 + 5, result); + * // result = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_union + * \see \p merge + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_union(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result); + + +/*! \p set_union constructs a sorted range that is the union of the sorted ranges + * [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_union performs the "union" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1), [first2, last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * This version of \p set_union compares elements using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_union to compute the union of + * two sets of integers sorted in ascending order using the \p thrust::host execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A1[7] = {12, 10, 8, 6, 4, 2, 0}; + * int A2[5] = {9, 7, 5, 3, 1}; + * + * int result[11]; + * + * int *result_end = thrust::set_union(thrust::host, A1, A1 + 7, A2, A2 + 5, result, thrust::greater()); + * // result = {12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_union + * \see \p merge + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template +__host__ __device__ + OutputIterator set_union(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_union constructs a sorted range that is the union of the sorted ranges + * [first1, last1) and [first2, last2). The return value is the + * end of the output range. + * + * In the simplest case, \p set_union performs the "union" operation from set + * theory: the output range contains a copy of every element that is contained in + * [first1, last1), [first2, last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [first1, last1) contains \c m elements + * that are equivalent to each other and if [first2, last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * This version of \p set_union compares elements using a function object \p comp. + * + * \param first1 The beginning of the first input range. + * \param last1 The end of the first input range. + * \param first2 The beginning of the second input range. + * \param last2 The end of the second input range. + * \param result The beginning of the output range. + * \param comp Comparison operator. + * \return The end of the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1's \c value_type is convertable to \p StrictWeakCompare's \c first_argument_type. + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2's \c value_type is convertable to \p StrictWeakCompare's \c second_argument_type. + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [first1, last1) and [first2, last2) shall be sorted with respect to \p comp. + * \pre The resulting range shall not overlap with either input range. + * + * The following code snippet demonstrates how to use \p set_union to compute the union of + * two sets of integers sorted in ascending order. + * + * \code + * #include + * #include + * ... + * int A1[7] = {12, 10, 8, 6, 4, 2, 0}; + * int A2[5] = {9, 7, 5, 3, 1}; + * + * int result[11]; + * + * int *result_end = thrust::set_union(A1, A1 + 7, A2, A2 + 5, result, thrust::greater()); + * // result = {12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/set_union + * \see \p merge + * \see \p includes + * \see \p set_union + * \see \p set_intersection + * \see \p set_symmetric_difference + * \see \p sort + * \see \p is_sorted + */ +template + OutputIterator set_union(InputIterator1 first1, + InputIterator1 last1, + InputIterator2 first2, + InputIterator2 last2, + OutputIterator result, + StrictWeakCompare comp); + + +/*! \p set_difference_by_key performs a key-value difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_difference_by_key performs the "difference" operation from set + * theory: the keys output range contains a copy of every element that is contained in + * [keys_first1, keys_last1) and not contained in [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [keys_first1, keys_last1) range shall be copied to the output range. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_difference_by_key compares key elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_difference_by_key to compute the + * set difference of two sets of integers sorted in ascending order with their values using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {0, 1, 3, 4, 5, 6, 9}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {1, 3, 5, 7, 9}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[3]; + * int vals_result[3]; + * + * thrust::pair end = thrust::set_difference_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {0, 4, 6} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_difference_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_difference_by_key performs a key-value difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_difference_by_key performs the "difference" operation from set + * theory: the keys output range contains a copy of every element that is contained in + * [keys_first1, keys_last1) and not contained in [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [keys_first1, keys_last1) range shall be copied to the output range. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_difference_by_key compares key elements using \c operator<. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_difference_by_key to compute the + * set difference of two sets of integers sorted in ascending order with their values. + * + * \code + * #include + * ... + * int A_keys[6] = {0, 1, 3, 4, 5, 6, 9}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {1, 3, 5, 7, 9}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[3]; + * int vals_result[3]; + * + * thrust::pair end = thrust::set_difference_by_key(A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {0, 4, 6} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_difference_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_difference_by_key performs a key-value difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_difference_by_key performs the "difference" operation from set + * theory: the keys output range contains a copy of every element that is contained in + * [keys_first1, keys_last1) and not contained in [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [keys_first1, keys_last1) range shall be copied to the output range. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_difference_by_key compares key elements using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_difference_by_key to compute the + * set difference of two sets of integers sorted in descending order with their values using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A_keys[6] = {9, 6, 5, 4, 3, 1, 0}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {9, 7, 5, 3, 1}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[3]; + * int vals_result[3]; + * + * thrust::pair end = thrust::set_difference_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result, thrust::greater()); + * // keys_result is now {0, 4, 6} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_difference_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_difference_by_key performs a key-value difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_difference_by_key performs the "difference" operation from set + * theory: the keys output range contains a copy of every element that is contained in + * [keys_first1, keys_last1) and not contained in [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, the last max(m-n,0) elements from + * [keys_first1, keys_last1) range shall be copied to the output range. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_difference_by_key compares key elements using a function object \p comp. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_difference_by_key to compute the + * set difference of two sets of integers sorted in descending order with their values. + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {9, 6, 5, 4, 3, 1, 0}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {9, 7, 5, 3, 1}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[3]; + * int vals_result[3]; + * + * thrust::pair end = thrust::set_difference_by_key(A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result, thrust::greater()); + * // keys_result is now {0, 4, 6} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_difference_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_intersection_by_key performs a key-value intersection operation from set theory. + * \p set_intersection_by_key constructs a sorted range that is the intersection of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_intersection_by_key performs the "intersection" operation from set + * theory: the keys output range contains a copy of every element that is contained in both + * [keys_first1, keys_last1) [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if an element appears \c m times in [keys_first1, keys_last1) + * and \c n times in [keys_first2, keys_last2) (where \c m may be zero), then it + * appears min(m,n) times in the keys output range. + * \p set_intersection_by_key is stable, meaning both that elements are copied from the first + * input range rather than the second, and that the relative order of elements in the output range + * is the same as the first input range. + * + * Each time a key element is copied from [keys_first1, keys_last1) to the keys output range, + * the corresponding value element is copied from [values_first1, values_last1) to the values + * output range. + * + * This version of \p set_intersection_by_key compares objects using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \note Unlike the other key-value set operations, \p set_intersection_by_key is unique in that it has no + * \c values_first2 parameter because elements from the second input range are never copied to the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_intersection_by_key to compute the + * set intersection of two sets of integers sorted in ascending order with their values using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {1, 3, 5, 7, 9, 11}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {1, 1, 2, 3, 5, 8, 13}; + * + * int keys_result[7]; + * int vals_result[7]; + * + * thrust::pair end = thrust::set_intersection_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, keys_result, vals_result); + * + * // keys_result is now {1, 3, 5} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_difference_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_intersection_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_intersection_by_key performs a key-value intersection operation from set theory. + * \p set_intersection_by_key constructs a sorted range that is the intersection of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_intersection_by_key performs the "intersection" operation from set + * theory: the keys output range contains a copy of every element that is contained in both + * [keys_first1, keys_last1) [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if an element appears \c m times in [keys_first1, keys_last1) + * and \c n times in [keys_first2, keys_last2) (where \c m may be zero), then it + * appears min(m,n) times in the keys output range. + * \p set_intersection_by_key is stable, meaning both that elements are copied from the first + * input range rather than the second, and that the relative order of elements in the output range + * is the same as the first input range. + * + * Each time a key element is copied from [keys_first1, keys_last1) to the keys output range, + * the corresponding value element is copied from [values_first1, values_last1) to the values + * output range. + * + * This version of \p set_intersection_by_key compares objects using \c operator<. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \note Unlike the other key-value set operations, \p set_intersection_by_key is unique in that it has no + * \c values_first2 parameter because elements from the second input range are never copied to the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_intersection_by_key to compute the + * set intersection of two sets of integers sorted in ascending order with their values. + * + * \code + * #include + * ... + * int A_keys[6] = {1, 3, 5, 7, 9, 11}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {1, 1, 2, 3, 5, 8, 13}; + * + * int keys_result[7]; + * int vals_result[7]; + * + * thrust::pair end = thrust::set_intersection_by_key(A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, keys_result, vals_result); + * + * // keys_result is now {1, 3, 5} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_difference_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_intersection_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_intersection_by_key performs a key-value intersection operation from set theory. + * \p set_intersection_by_key constructs a sorted range that is the intersection of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_intersection_by_key performs the "intersection" operation from set + * theory: the keys output range contains a copy of every element that is contained in both + * [keys_first1, keys_last1) [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if an element appears \c m times in [keys_first1, keys_last1) + * and \c n times in [keys_first2, keys_last2) (where \c m may be zero), then it + * appears min(m,n) times in the keys output range. + * \p set_intersection_by_key is stable, meaning both that elements are copied from the first + * input range rather than the second, and that the relative order of elements in the output range + * is the same as the first input range. + * + * Each time a key element is copied from [keys_first1, keys_last1) to the keys output range, + * the corresponding value element is copied from [values_first1, values_last1) to the values + * output range. + * + * This version of \p set_intersection_by_key compares objects using a function object \p comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \note Unlike the other key-value set operations, \p set_intersection_by_key is unique in that it has no + * \c values_first2 parameter because elements from the second input range are never copied to the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_intersection_by_key to compute the + * set intersection of two sets of integers sorted in descending order with their values using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A_keys[6] = {11, 9, 7, 5, 3, 1}; + * int A_vals[6] = { 0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {13, 8, 5, 3, 2, 1, 1}; + * + * int keys_result[7]; + * int vals_result[7]; + * + * thrust::pair end = thrust::set_intersection_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, keys_result, vals_result, thrust::greater()); + * + * // keys_result is now {5, 3, 1} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_difference_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_intersection_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_intersection_by_key performs a key-value intersection operation from set theory. + * \p set_intersection_by_key constructs a sorted range that is the intersection of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_intersection_by_key performs the "intersection" operation from set + * theory: the keys output range contains a copy of every element that is contained in both + * [keys_first1, keys_last1) [keys_first2, keys_last2). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if an element appears \c m times in [keys_first1, keys_last1) + * and \c n times in [keys_first2, keys_last2) (where \c m may be zero), then it + * appears min(m,n) times in the keys output range. + * \p set_intersection_by_key is stable, meaning both that elements are copied from the first + * input range rather than the second, and that the relative order of elements in the output range + * is the same as the first input range. + * + * Each time a key element is copied from [keys_first1, keys_last1) to the keys output range, + * the corresponding value element is copied from [values_first1, values_last1) to the values + * output range. + * + * This version of \p set_intersection_by_key compares objects using a function object \p comp. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \note Unlike the other key-value set operations, \p set_intersection_by_key is unique in that it has no + * \c values_first2 parameter because elements from the second input range are never copied to the output range. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_intersection_by_key to compute the + * set intersection of two sets of integers sorted in descending order with their values. + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {11, 9, 7, 5, 3, 1}; + * int A_vals[6] = { 0, 0, 0, 0, 0, 0}; + * + * int B_keys[7] = {13, 8, 5, 3, 2, 1, 1}; + * + * int keys_result[7]; + * int vals_result[7]; + * + * thrust::pair end = thrust::set_intersection_by_key(A_keys, A_keys + 6, B_keys, B_keys + 7, A_vals, keys_result, vals_result, thrust::greater()); + * + * // keys_result is now {5, 3, 1} + * // vals_result is now {0, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_difference_by_key + * \see \p set_symmetric_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_intersection_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_symmetric_difference_by_key performs a key-value symmetric difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the symmetric difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_symmetric_difference_by_key performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [keys_first1, keys_last1) but not [keys_first2, keys_last1), and a copy of + * every element that is contained in [keys_first2, keys_last2) but not [keys_first1, keys_last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements that are + * equivalent to each other and [keys_first2, keys_last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [keys_first1, keys_last1) if m > n, and + * the last n - m of these elements from [keys_first2, keys_last2) if m < n. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_symmetric_difference_by_key compares key elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in ascending order with their values using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {0, 1, 2, 2, 4, 6, 7}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {1, 1, 2, 5, 8}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[6]; + * int vals_result[6]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {0, 4, 5, 6, 7, 8} + * // vals_result is now {0, 0, 1, 0, 0, 1} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_symmetric_difference_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_symmetric_difference_by_key performs a key-value symmetric difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the symmetric difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_symmetric_difference_by_key performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [keys_first1, keys_last1) but not [keys_first2, keys_last1), and a copy of + * every element that is contained in [keys_first2, keys_last2) but not [keys_first1, keys_last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements that are + * equivalent to each other and [keys_first2, keys_last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [keys_first1, keys_last1) if m > n, and + * the last n - m of these elements from [keys_first2, keys_last2) if m < n. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_symmetric_difference_by_key compares key elements using \c operator<. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in ascending order with their values. + * + * \code + * #include + * ... + * int A_keys[6] = {0, 1, 2, 2, 4, 6, 7}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {1, 1, 2, 5, 8}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[6]; + * int vals_result[6]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {0, 4, 5, 6, 7, 8} + * // vals_result is now {0, 0, 1, 0, 0, 1} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_symmetric_difference_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_symmetric_difference_by_key performs a key-value symmetric difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the symmetric difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_symmetric_difference_by_key performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [keys_first1, keys_last1) but not [keys_first2, keys_last1), and a copy of + * every element that is contained in [keys_first2, keys_last2) but not [keys_first1, keys_last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements that are + * equivalent to each other and [keys_first2, keys_last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [keys_first1, keys_last1) if m > n, and + * the last n - m of these elements from [keys_first2, keys_last2) if m < n. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_symmetric_difference_by_key compares key elements using a function object \c comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in descending order with their values using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A_keys[6] = {7, 6, 4, 2, 2, 1, 0}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {8, 5, 2, 1, 1}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[6]; + * int vals_result[6]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {8, 7, 6, 5, 4, 0} + * // vals_result is now {1, 0, 0, 1, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_symmetric_difference_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_symmetric_difference_by_key performs a key-value symmetric difference operation from set theory. + * \p set_difference_by_key constructs a sorted range that is the symmetric difference of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_symmetric_difference_by_key performs a set theoretic calculation: + * it constructs the union of the two sets A - B and B - A, where A and B are the two + * input ranges. That is, the output range contains a copy of every element that is + * contained in [keys_first1, keys_last1) but not [keys_first2, keys_last1), and a copy of + * every element that is contained in [keys_first2, keys_last2) but not [keys_first1, keys_last1). + * The general case is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements that are + * equivalent to each other and [keys_first2, keys_last1) contains \c n elements that are + * equivalent to them, then |m - n| of those elements shall be copied to the output + * range: the last m - n elements from [keys_first1, keys_last1) if m > n, and + * the last n - m of these elements from [keys_first2, keys_last2) if m < n. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_symmetric_difference_by_key compares key elements using a function object \c comp. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in descending order with their values. + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {7, 6, 4, 2, 2, 1, 0}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {8, 5, 2, 1, 1}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[6]; + * int vals_result[6]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {8, 7, 6, 5, 4, 0} + * // vals_result is now {1, 0, 0, 1, 0, 0} + * \endcode + * + * \see \p set_union_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_symmetric_difference_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_union_by_key performs a key-value union operation from set theory. + * \p set_union_by_key constructs a sorted range that is the union of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_union_by_key performs the "union" operation from set theory: + * the output range contains a copy of every element that is contained in + * [keys_first1, keys_last1), [keys_first2, keys_last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_union_by_key compares key elements using \c operator<. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in ascending order with their values using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {0, 2, 4, 6, 8, 10, 12}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {1, 3, 5, 7, 9}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[11]; + * int vals_result[11]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12} + * // vals_result is now {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0} + * \endcode + * + * \see \p set_symmetric_difference_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_union_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_union_by_key performs a key-value union operation from set theory. + * \p set_union_by_key constructs a sorted range that is the union of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_union_by_key performs the "union" operation from set theory: + * the output range contains a copy of every element that is contained in + * [keys_first1, keys_last1), [keys_first2, keys_last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_union_by_key compares key elements using \c operator<. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to operator<. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in ascending order with their values. + * + * \code + * #include + * ... + * int A_keys[6] = {0, 2, 4, 6, 8, 10, 12}; + * int A_vals[6] = {0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {1, 3, 5, 7, 9}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[11]; + * int vals_result[11]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result); + * // keys_result is now {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12} + * // vals_result is now {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0} + * \endcode + * + * \see \p set_symmetric_difference_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_union_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p set_union_by_key performs a key-value union operation from set theory. + * \p set_union_by_key constructs a sorted range that is the union of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_union_by_key performs the "union" operation from set theory: + * the output range contains a copy of every element that is contained in + * [keys_first1, keys_last1), [keys_first2, keys_last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_union_by_key compares key elements using a function object \c comp. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in descending order with their values using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * int A_keys[6] = {12, 10, 8, 6, 4, 2, 0}; + * int A_vals[6] = { 0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {9, 7, 5, 3, 1}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[11]; + * int vals_result[11]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(thrust::host, A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result, thrust::greater()); + * // keys_result is now {12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + * // vals_result is now { 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0} + * \endcode + * + * \see \p set_symmetric_difference_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template +__host__ __device__ + thrust::pair + set_union_by_key(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \p set_union_by_key performs a key-value union operation from set theory. + * \p set_union_by_key constructs a sorted range that is the union of the sorted + * ranges [keys_first1, keys_last1) and [keys_first2, keys_last2). Associated + * with each element from the input and output key ranges is a value element. The associated input + * value ranges need not be sorted. + * + * In the simplest case, \p set_union_by_key performs the "union" operation from set theory: + * the output range contains a copy of every element that is contained in + * [keys_first1, keys_last1), [keys_first2, keys_last1), or both. The general case + * is more complicated, because the input ranges may contain duplicate elements. + * The generalization is that if [keys_first1, keys_last1) contains \c m elements + * that are equivalent to each other and if [keys_first2, keys_last2) contains \c n + * elements that are equivalent to them, then all \c m elements from the first + * range shall be copied to the output range, in order, and then max(n - m, 0) + * elements from the second range shall be copied to the output, in order. + * + * Each time a key element is copied from [keys_first1, keys_last1) or + * [keys_first2, keys_last2) is copied to the keys output range, the + * corresponding value element is copied from the corresponding values input range (beginning at + * \p values_first1 or \p values_first2) to the values output range. + * + * This version of \p set_union_by_key compares key elements using a function object \c comp. + * + * \param keys_first1 The beginning of the first input range of keys. + * \param keys_last1 The end of the first input range of keys. + * \param keys_first2 The beginning of the second input range of keys. + * \param keys_last2 The end of the second input range of keys. + * \param values_first1 The beginning of the first input range of values. + * \param values_first2 The beginning of the first input range of values. + * \param keys_result The beginning of the output range of keys. + * \param values_result The beginning of the output range of values. + * \param comp Comparison operator. + * \return A \p pair \c p such that p.first is the end of the output range of keys, + * and such that p.second is the end of the output range of values. + * + * \tparam InputIterator1 is a model of Input Iterator, + * \p InputIterator1 and \p InputIterator2 have the same \c value_type, + * \p InputIterator1's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator1's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator1's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator2 is a model of Input Iterator, + * \p InputIterator2 and \p InputIterator1 have the same \c value_type, + * \p InputIterator2's \c value_type is a model of LessThan Comparable, + * the ordering on \p InputIterator2's \c value_type is a strict weak ordering, as defined in the LessThan Comparable requirements, + * and \p InputIterator2's \c value_type is convertable to a type in \p OutputIterator's set of \c value_types. + * \tparam InputIterator3 is a model of Input Iterator, + * and \p InputIterator3's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam InputIterator4 is a model of Input Iterator, + * and \p InputIterator4's \c value_type is convertible to a type in \p OutputIterator2's set of \c value_types. + * \tparam OutputIterator1 is a model of Output Iterator. + * \tparam OutputIterator2 is a model of Output Iterator. + * \tparam StrictWeakCompare is a model of Strict Weak Ordering. + * + * \pre The ranges [keys_first1, keys_last1) and [keys_first2, keys_last2) shall be sorted with respect to \p comp. + * \pre The resulting ranges shall not overlap with any input range. + * + * The following code snippet demonstrates how to use \p set_symmetric_difference_by_key to compute the + * symmetric difference of two sets of integers sorted in descending order with their values. + * + * \code + * #include + * #include + * ... + * int A_keys[6] = {12, 10, 8, 6, 4, 2, 0}; + * int A_vals[6] = { 0, 0, 0, 0, 0, 0, 0}; + * + * int B_keys[5] = {9, 7, 5, 3, 1}; + * int B_vals[5] = {1, 1, 1, 1, 1}; + * + * int keys_result[11]; + * int vals_result[11]; + * + * thrust::pair end = thrust::set_symmetric_difference_by_key(A_keys, A_keys + 6, B_keys, B_keys + 5, A_vals, B_vals, keys_result, vals_result, thrust::greater()); + * // keys_result is now {12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + * // vals_result is now { 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0} + * \endcode + * + * \see \p set_symmetric_difference_by_key + * \see \p set_intersection_by_key + * \see \p set_difference_by_key + * \see \p sort_by_key + * \see \p is_sorted + */ +template + thrust::pair + set_union_by_key(InputIterator1 keys_first1, + InputIterator1 keys_last1, + InputIterator2 keys_first2, + InputIterator2 keys_last2, + InputIterator3 values_first1, + InputIterator4 values_first2, + OutputIterator1 keys_result, + OutputIterator2 values_result, + StrictWeakCompare comp); + + +/*! \} // end set_operations + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/shuffle.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/shuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..d95327e29ce44d5f7972ad02ff306adb85e4bf83 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/shuffle.h @@ -0,0 +1,179 @@ +/* + * Copyright 2008-2020 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file shuffle.h + * \brief Reorders range by a uniform random permutation + */ + +#pragma once + +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reordering +* \ingroup algorithms +* +* \addtogroup shuffling +* \ingroup reordering +* \{ +*/ + + +/*! \p shuffle reorders the elements [first, last) by a uniform pseudorandom permutation, defined by + * random engine \p g. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to shuffle. + * \param last The end of the sequence to shuffle. + * \param g A UniformRandomBitGenerator + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam RandomIterator is a random access iterator + * \tparam URBG is a uniform random bit generator + * + * The following code snippet demonstrates how to use \p shuffle to create a random permutation + * using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::default_random_engine g; + * thrust::shuffle(thrust::host, A, A + N, g); + * // A is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9} + * \endcode + * + * \see \p shuffle_copy + */ +template +__host__ __device__ void shuffle( + const thrust::detail::execution_policy_base& exec, + RandomIterator first, RandomIterator last, URBG&& g); + +/*! \p shuffle reorders the elements [first, last) by a uniform pseudorandom permutation, defined by + * random engine \p g. + * + * \param first The beginning of the sequence to shuffle. + * \param last The end of the sequence to shuffle. + * \param g A UniformRandomBitGenerator + * + * \tparam RandomIterator is a random access iterator + * \tparam URBG is a uniform random bit generator + * + * The following code snippet demonstrates how to use \p shuffle to create a random permutation. + * + * \code + * #include + * #include + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * const int N = sizeof(A)/sizeof(int); + * thrust::default_random_engine g; + * thrust::shuffle(A, A + N, g); + * // A is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9} + * \endcode + * + * \see \p shuffle_copy + */ +template +__host__ __device__ void shuffle(RandomIterator first, RandomIterator last, + URBG&& g); + +/*! shuffle_copy differs from shuffle only in that the reordered sequence is written to different output sequences, rather than in place. + * \p shuffle_copy reorders the elements [first, last) by a uniform pseudorandom permutation, defined by + * random engine \p g. + * + * The algorithm's execution is parallelized as determined by \p exec. + + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence to shuffle. + * \param last The end of the sequence to shuffle. + * \param result Destination of shuffled sequence + * \param g A UniformRandomBitGenerator + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam RandomIterator is a random access iterator + * \tparam OutputIterator is a model of Output Iterator. + * \tparam URBG is a uniform random bit generator + * + * The following code snippet demonstrates how to use \p shuffle_copy to create a random permutation. + * + * \code + * #include + * #include + * #include + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * thrust::default_random_engine g; + * thrust::shuffle_copy(thrust::host, A, A + N, result, g); + * // result is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9} + * \endcode + * + * \see \p shuffle + */ +template +__host__ __device__ void shuffle_copy( + const thrust::detail::execution_policy_base& exec, + RandomIterator first, RandomIterator last, OutputIterator result, URBG&& g); + +/*! shuffle_copy differs from shuffle only in that the reordered sequence is written to different output sequences, rather than in place. + *\p shuffle_copy reorders the elements [first, last) by a uniform pseudorandom permutation, defined by + * random engine \p g. + * + * \param first The beginning of the sequence to shuffle. + * \param last The end of the sequence to shuffle. + * \param result Destination of shuffled sequence + * \param g A UniformRandomBitGenerator + * + * \tparam RandomIterator is a random access iterator + * \tparam OutputIterator is a model of Output Iterator. + * \tparam URBG is a uniform random bit generator + * + * The following code snippet demonstrates how to use \p shuffle_copy to create a random permutation. + * + * \code + * #include + * #include + * int A[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + * int result[10]; + * const int N = sizeof(A)/sizeof(int); + * thrust::default_random_engine g; + * thrust::shuffle_copy(A, A + N, result, g); + * // result is now {6, 5, 8, 7, 2, 1, 4, 3, 10, 9} + * \endcode + * + * \see \p shuffle + */ +template +__host__ __device__ void shuffle_copy(RandomIterator first, RandomIterator last, + OutputIterator result, URBG&& g); + +THRUST_NAMESPACE_END + +#include +#endif diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/swap.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/swap.h new file mode 100644 index 0000000000000000000000000000000000000000..d8a8be73c3111390cc20c6e45089a04969e73af1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/swap.h @@ -0,0 +1,184 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file swap.h + * \brief Functions for swapping the value of elements + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup utility + * \{ + */ + +/*! \addtogroup swap + * \{ + */ + +/*! \p swap assigns the contents of \c a to \c b and the + * contents of \c b to \c a. This is used as a primitive operation + * by many other algorithms. + * + * \param a The first value of interest. After completion, + * the value of b will be returned here. + * \param b The second value of interest. After completion, + * the value of a will be returned here. + * + * \tparam Assignable is a model of Assignable. + * + * The following code snippet demonstrates how to use \p swap to + * swap the contents of two variables. + * + * \code + * #include + * ... + * int x = 1; + * int y = 2; + * thrust::swap(x,h); + * + * // x == 2, y == 1 + * \endcode + */ +template +__host__ __device__ +inline void swap(Assignable1 &a, Assignable2 &b); + +/*! \} // swap + */ + +/*! \} // utility + */ + + +/*! \addtogroup copying + * \{ + */ + + +/*! \p swap_ranges swaps each of the elements in the range [first1, last1) + * with the corresponding element in the range [first2, first2 + (last1 - first1)). + * That is, for each integer \c n such that 0 <= n < (last1 - first1), it swaps + * *(first1 + n) and *(first2 + n). The return value is + * first2 + (last1 - first1). + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first sequence to swap. + * \param last1 One position past the last element of the first sequence to swap. + * \param first2 The beginning of the second sequence to swap. + * \return An iterator pointing to one position past the last element of the second + * sequence to swap. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator1 is a model of Forward Iterator, + * and \p ForwardIterator1's \c value_type must be convertible to \p ForwardIterator2's \c value_type. + * \tparam ForwardIterator2 is a model of Forward Iterator, + * and \p ForwardIterator2's \c value_type must be convertible to \p ForwardIterator1's \c value_type. + * + * \pre \p first1 may equal \p first2, but the range [first1, last1) shall not overlap the range [first2, first2 + (last1 - first1)) otherwise. + * + * The following code snippet demonstrates how to use \p swap_ranges to + * swap the contents of two \c thrust::device_vectors using the \p thrust::device execution + * policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * thrust::device_vector v1(2), v2(2); + * v1[0] = 1; + * v1[1] = 2; + * v2[0] = 3; + * v2[1] = 4; + * + * thrust::swap_ranges(thrust::device, v1.begin(), v1.end(), v2.begin()); + * + * // v1[0] == 3, v1[1] == 4, v2[0] == 1, v2[1] == 2 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/swap_ranges + * \see \c swap + */ +template +__host__ __device__ + ForwardIterator2 swap_ranges(const thrust::detail::execution_policy_base &exec, + ForwardIterator1 first1, + ForwardIterator1 last1, + ForwardIterator2 first2); + + +/*! \p swap_ranges swaps each of the elements in the range [first1, last1) + * with the corresponding element in the range [first2, first2 + (last1 - first1)). + * That is, for each integer \c n such that 0 <= n < (last1 - first1), it swaps + * *(first1 + n) and *(first2 + n). The return value is + * first2 + (last1 - first1). + * + * \param first1 The beginning of the first sequence to swap. + * \param last1 One position past the last element of the first sequence to swap. + * \param first2 The beginning of the second sequence to swap. + * \return An iterator pointing to one position past the last element of the second + * sequence to swap. + * + * \tparam ForwardIterator1 is a model of Forward Iterator, + * and \p ForwardIterator1's \c value_type must be convertible to \p ForwardIterator2's \c value_type. + * \tparam ForwardIterator2 is a model of Forward Iterator, + * and \p ForwardIterator2's \c value_type must be convertible to \p ForwardIterator1's \c value_type. + * + * \pre \p first1 may equal \p first2, but the range [first1, last1) shall not overlap the range [first2, first2 + (last1 - first1)) otherwise. + * + * The following code snippet demonstrates how to use \p swap_ranges to + * swap the contents of two \c thrust::device_vectors. + * + * \code + * #include + * #include + * ... + * thrust::device_vector v1(2), v2(2); + * v1[0] = 1; + * v1[1] = 2; + * v2[0] = 3; + * v2[1] = 4; + * + * thrust::swap_ranges(v1.begin(), v1.end(), v2.begin()); + * + * // v1[0] == 3, v1[1] == 4, v2[0] == 1, v2[1] == 2 + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/swap_ranges + * \see \c swap + */ +template + ForwardIterator2 swap_ranges(ForwardIterator1 first1, + ForwardIterator1 last1, + ForwardIterator2 first2); + + +/*! \} // copying + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/system_error.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/system_error.h new file mode 100644 index 0000000000000000000000000000000000000000..6bf240e512342ada0226521f49dc07ca49d5a104 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/system_error.h @@ -0,0 +1,49 @@ +/* + * Copyright 2008-2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file thrust/system_error.h + * \brief System diagnostics + */ + +#pragma once + +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup system + * \{ + */ + +/*! \namespace thrust::system + * \brief \p thrust::system is the namespace which contains specific Thrust + * backend systems. It also contains functionality for reporting error + * conditions originating from the operating system or other low-level + * application program interfaces such as the CUDA runtime. They are + * provided in a separate namespace for import convenience but are + * also aliased in the top-level \p thrust namespace for easy access. + */ +namespace system +{ +} // end system + +/*! \} // end system + */ + +THRUST_NAMESPACE_END + +#include +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform.h new file mode 100644 index 0000000000000000000000000000000000000000..2d064c13b3e4dcf179f186f9d6e4237110065bb1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform.h @@ -0,0 +1,721 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file thrust/transform.h + * \brief Transforms input ranges using a function object + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + +/*! \addtogroup transformations + * \ingroup algorithms + * \{ + */ + + +/*! This version of \p transform applies a unary function to each element + * of an input sequence and stores the result in the corresponding + * position in an output sequence. Specifically, for each iterator + * i in the range [\p first, \p last) the operation + * op(*i) is performed and the result is assigned to *o, + * where o is the corresponding output iterator in the range + * [\p result, \p result + (\p last - \p first) ). The input and + * output sequences may coincide, resulting in an in-place transformation. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param op The transformation operation. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) shall not overlap the range [result, result + (last - first)) otherwise. + * + * The following code snippet demonstrates how to use \p transform to negate a range in-place + * using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * thrust::negate op; + * + * thrust::transform(thrust::host, data, data + 10, data, op); // in-place transformation + * + * // data is now {5, 0, -2, 3, -2, -4, 0, 1, -2, -8}; + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/transform + */ +template +__host__ __device__ + OutputIterator transform(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + OutputIterator result, + UnaryFunction op); + + +/*! This version of \p transform applies a unary function to each element + * of an input sequence and stores the result in the corresponding + * position in an output sequence. Specifically, for each iterator + * i in the range [\p first, \p last) the operation + * op(*i) is performed and the result is assigned to *o, + * where o is the corresponding output iterator in the range + * [\p result, \p result + (\p last - \p first) ). The input and + * output sequences may coincide, resulting in an in-place transformation. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) shall not overlap the range [result, result + (last - first)) otherwise. + * + * The following code snippet demonstrates how to use \p transform + * + * \code + * #include + * #include + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * thrust::negate op; + * + * thrust::transform(data, data + 10, data, op); // in-place transformation + * + * // data is now {5, 0, -2, 3, -2, -4, 0, 1, -2, -8}; + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/transform + */ +template + OutputIterator transform(InputIterator first, InputIterator last, + OutputIterator result, + UnaryFunction op); + + +/*! This version of \p transform applies a binary function to each pair + * of elements from two input sequences and stores the result in the + * corresponding position in an output sequence. Specifically, for + * each iterator i in the range [\p first1, \p last1) and + * j = first + (i - first1) in the range [\p first2, \p last2) + * the operation op(*i,*j) is performed and the result is + * assigned to *o, where o is the corresponding + * output iterator in the range [\p result, \p result + (\p last - \p first) ). + * The input and output sequences may coincide, resulting in an + * in-place transformation. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input sequence. + * \param last1 The end of the first input sequence. + * \param first2 The beginning of the second input sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam BinaryFunction is a model of Binary Function + * and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first1 may equal \p result, but the range [first1, last1) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * \pre \p first2 may equal \p result, but the range [first2, first2 + (last1 - first1)) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * + * The following code snippet demonstrates how to use \p transform to compute the sum of two + * ranges using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int input1[6] = {-5, 0, 2, 3, 2, 4}; + * int input2[6] = { 3, 6, -2, 1, 2, 3}; + * int output[6]; + * + * thrust::plus op; + * + * thrust::transform(thrust::host, input1, input1 + 6, input2, output, op); + * + * // output is now {-2, 6, 0, 4, 4, 7}; + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/transform + */ +template +__host__ __device__ + OutputIterator transform(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + BinaryFunction op); + + +/*! This version of \p transform applies a binary function to each pair + * of elements from two input sequences and stores the result in the + * corresponding position in an output sequence. Specifically, for + * each iterator i in the range [\p first1, \p last1) and + * j = first + (i - first1) in the range [\p first2, \p last2) + * the operation op(*i,*j) is performed and the result is + * assigned to *o, where o is the corresponding + * output iterator in the range [\p result, \p result + (\p last - \p first) ). + * The input and output sequences may coincide, resulting in an + * in-place transformation. + * + * \param first1 The beginning of the first input sequence. + * \param last1 The end of the first input sequence. + * \param first2 The beginning of the second input sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam BinaryFunction is a model of Binary Function + * and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * + * \pre \p first1 may equal \p result, but the range [first1, last1) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * \pre \p first2 may equal \p result, but the range [first2, first2 + (last1 - first1)) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * + * The following code snippet demonstrates how to use \p transform + * + * \code + * #include + * #include + * + * int input1[6] = {-5, 0, 2, 3, 2, 4}; + * int input2[6] = { 3, 6, -2, 1, 2, 3}; + * int output[6]; + * + * thrust::plus op; + * + * thrust::transform(input1, input1 + 6, input2, output, op); + * + * // output is now {-2, 6, 0, 4, 4, 7}; + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/transform + */ +template + OutputIterator transform(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, + OutputIterator result, + BinaryFunction op); + + +/*! This version of \p transform_if conditionally applies a unary function + * to each element of an input sequence and stores the result in the corresponding + * position in an output sequence if the corresponding position in the input sequence + * satifies a predicate. Otherwise, the corresponding position in the + * output sequence is not modified. + * + * Specifically, for each iterator i in the range [first, last) the + * predicate pred(*i) is evaluated. If this predicate + * evaluates to \c true, the result of op(*i) is assigned to *o, + * where o is the corresponding output iterator in the range + * [result, result + (last - first) ). Otherwise, op(*i) is + * not evaluated and no assignment occurs. The input and output sequences may coincide, + * resulting in an in-place transformation. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \param pred The predicate operation. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \c InputIterator's \c value_type is convertible to \c Predicate's \c argument_type, + * and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * \tparam Predicate is a model of Predicate. + * + * \pre \p first may equal \p result, but the range [first, last) shall not overlap the range [result, result + (last - first)) otherwise. + * + * The following code snippet demonstrates how to use \p transform_if to negate the odd-valued + * elements of a range using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * struct is_odd + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x % 2; + * } + * }; + * + * thrust::negate op; + * thrust::identity identity; + * + * // negate odd elements + * thrust::transform_if(thrust::host, data, data + 10, data, op, is_odd()); // in-place transformation + * + * // data is now {5, 0, 2, 3, 2, 4, 0, 1, 2, 8}; + * \endcode + * + * \see thrust::transform + */ +template +__host__ __device__ + ForwardIterator transform_if(const thrust::detail::execution_policy_base &exec, + InputIterator first, InputIterator last, + ForwardIterator result, + UnaryFunction op, + Predicate pred); + + +/*! This version of \p transform_if conditionally applies a unary function + * to each element of an input sequence and stores the result in the corresponding + * position in an output sequence if the corresponding position in the input sequence + * satifies a predicate. Otherwise, the corresponding position in the + * output sequence is not modified. + * + * Specifically, for each iterator i in the range [first, last) the + * predicate pred(*i) is evaluated. If this predicate + * evaluates to \c true, the result of op(*i) is assigned to *o, + * where o is the corresponding output iterator in the range + * [result, result + (last - first) ). Otherwise, op(*i) is + * not evaluated and no assignment occurs. The input and output sequences may coincide, + * resulting in an in-place transformation. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \param pred The predicate operation. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator, + * and \c InputIterator's \c value_type is convertible to \c Predicate's \c argument_type, + * and \c InputIterator's \c value_type is convertible to \c UnaryFunction's \c argument_type. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * \tparam Predicate is a model of Predicate. + * + * \pre \p first may equal \p result, but the range [first, last) shall not overlap the range [result, result + (last - first)) otherwise. + * + * The following code snippet demonstrates how to use \p transform_if: + * + * \code + * #include + * #include + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * + * struct is_odd + * { + * __host__ __device__ + * bool operator()(int x) + * { + * return x % 2; + * } + * }; + * + * thrust::negate op; + * thrust::identity identity; + * + * // negate odd elements + * thrust::transform_if(data, data + 10, data, op, is_odd()); // in-place transformation + * + * // data is now {5, 0, 2, 3, 2, 4, 0, 1, 2, 8}; + * \endcode + * + * \see thrust::transform + */ +template + ForwardIterator transform_if(InputIterator first, InputIterator last, + ForwardIterator result, + UnaryFunction op, + Predicate pred); + + +/*! This version of \p transform_if conditionally applies a unary function + * to each element of an input sequence and stores the result in the corresponding + * position in an output sequence if the corresponding position in a stencil sequence + * satisfies a predicate. Otherwise, the corresponding position in the + * output sequence is not modified. + * + * Specifically, for each iterator i in the range [first, last) the + * predicate pred(*s) is evaluated, where s is the corresponding input + * iterator in the range [stencil, stencil + (last - first) ). If this predicate + * evaluates to \c true, the result of op(*i) is assigned to *o, + * where o is the corresponding output iterator in the range + * [result, result + (last - first) ). Otherwise, op(*i) is + * not evaluated and no assignment occurs. The input and output sequences may coincide, + * resulting in an in-place transformation. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \param pred The predicate operation. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * and \c InputIterator1's \c value_type is convertible to \c UnaryFunction's \c argument_type. + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c Predicate's \c argument_type. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * \tparam Predicate is a model of Predicate. + * + * \pre \p first may equal \p result, but the range [first, last) shall not overlap the range [result, result + (last - first)) otherwise. + * \pre \p stencil may equal \p result, but the range [stencil, stencil + (last - first)) shall not overlap the range [result, result + (last - first)) otherwise. + * + * The following code snippet demonstrates how to use \p transform_if using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * int stencil[10] = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0}; + * + * thrust::negate op; + * thrust::identity identity; + * + * thrust::transform_if(thrust::host, data, data + 10, stencil, data, op, identity); // in-place transformation + * + * // data is now {5, 0, -2, -3, -2, 4, 0, -1, -2, 8}; + * \endcode + * + * \see thrust::transform + */ +template +__host__ __device__ + ForwardIterator transform_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first, InputIterator1 last, + InputIterator2 stencil, + ForwardIterator result, + UnaryFunction op, + Predicate pred); + + +/*! This version of \p transform_if conditionally applies a unary function + * to each element of an input sequence and stores the result in the corresponding + * position in an output sequence if the corresponding position in a stencil sequence + * satisfies a predicate. Otherwise, the corresponding position in the + * output sequence is not modified. + * + * Specifically, for each iterator i in the range [first, last) the + * predicate pred(*s) is evaluated, where s is the corresponding input + * iterator in the range [stencil, stencil + (last - first) ). If this predicate + * evaluates to \c true, the result of op(*i) is assigned to *o, + * where o is the corresponding output iterator in the range + * [result, result + (last - first) ). Otherwise, op(*i) is + * not evaluated and no assignment occurs. The input and output sequences may coincide, + * resulting in an in-place transformation. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the output sequence. + * \param op The tranformation operation. + * \param pred The predicate operation. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * and \c InputIterator1's \c value_type is convertible to \c UnaryFunction's \c argument_type. + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c Predicate's \c argument_type. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and \c UnaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * \tparam Predicate is a model of Predicate. + * + * \pre \p first may equal \p result, but the range [first, last) shall not overlap the range [result, result + (last - first)) otherwise. + * \pre \p stencil may equal \p result, but the range [stencil, stencil + (last - first)) shall not overlap the range [result, result + (last - first)) otherwise. + * + * The following code snippet demonstrates how to use \p transform_if: + * + * \code + * #include + * #include + * + * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; + * int stencil[10] = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0}; + * + * thrust::negate op; + * thrust::identity identity; + * + * thrust::transform_if(data, data + 10, stencil, data, op, identity); // in-place transformation + * + * // data is now {5, 0, -2, -3, -2, 4, 0, -1, -2, 8}; + * \endcode + * + * \see thrust::transform + */ +template + ForwardIterator transform_if(InputIterator1 first, InputIterator1 last, + InputIterator2 stencil, + ForwardIterator result, + UnaryFunction op, + Predicate pred); + + +/*! This version of \p transform_if conditionally applies a binary function + * to each pair of elements from two input sequences and stores the result in the corresponding + * position in an output sequence if the corresponding position in a stencil sequence + * satifies a predicate. Otherwise, the corresponding position in the + * output sequence is not modified. + * + * Specifically, for each iterator i in the range [first1, last1) and + * j = first2 + (i - first1) in the range [first2, first2 + (last1 - first1) ), + * the predicate pred(*s) is evaluated, where s is the corresponding input + * iterator in the range [stencil, stencil + (last1 - first1) ). If this predicate + * evaluates to \c true, the result of binary_op(*i,*j) is assigned to *o, + * where o is the corresponding output iterator in the range + * [result, result + (last1 - first1) ). Otherwise, binary_op(*i,*j) is + * not evaluated and no assignment occurs. The input and output sequences may coincide, + * resulting in an in-place transformation. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first1 The beginning of the first input sequence. + * \param last1 The end of the first input sequence. + * \param first2 The beginning of the second input sequence. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the output sequence. + * \param binary_op The transformation operation. + * \param pred The predicate operation. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator + * and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam BinaryFunction is a model of Binary Function + * and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * \tparam Predicate is a model of Predicate. + * + * \pre \p first1 may equal \p result, but the range [first1, last1) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * \pre \p first2 may equal \p result, but the range [first2, first2 + (last1 - first1)) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * \pre \p stencil may equal \p result, but the range [stencil, stencil + (last1 - first1)) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * + * The following code snippet demonstrates how to use \p transform_if using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * ... + * + * int input1[6] = {-5, 0, 2, 3, 2, 4}; + * int input2[6] = { 3, 6, -2, 1, 2, 3}; + * int stencil[8] = { 1, 0, 1, 0, 1, 0}; + * int output[6]; + * + * thrust::plus op; + * thrust::identity identity; + * + * thrust::transform_if(thrust::host, input1, input1 + 6, input2, stencil, output, op, identity); + * + * // output is now {-2, 0, 0, 3, 4, 4}; + * \endcode + * + * \see thrust::transform + */ +template +__host__ __device__ + ForwardIterator transform_if(const thrust::detail::execution_policy_base &exec, + InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, + InputIterator3 stencil, + ForwardIterator result, + BinaryFunction binary_op, + Predicate pred); + + +/*! This version of \p transform_if conditionally applies a binary function + * to each pair of elements from two input sequences and stores the result in the corresponding + * position in an output sequence if the corresponding position in a stencil sequence + * satifies a predicate. Otherwise, the corresponding position in the + * output sequence is not modified. + * + * Specifically, for each iterator i in the range [first1, last1) and + * j = first2 + (i - first1) in the range [first2, first2 + (last1 - first1) ), + * the predicate pred(*s) is evaluated, where s is the corresponding input + * iterator in the range [stencil, stencil + (last1 - first1) ). If this predicate + * evaluates to \c true, the result of binary_op(*i,*j) is assigned to *o, + * where o is the corresponding output iterator in the range + * [result, result + (last1 - first1) ). Otherwise, binary_op(*i,*j) is + * not evaluated and no assignment occurs. The input and output sequences may coincide, + * resulting in an in-place transformation. + * + * \param first1 The beginning of the first input sequence. + * \param last1 The end of the first input sequence. + * \param first2 The beginning of the second input sequence. + * \param stencil The beginning of the stencil sequence. + * \param result The beginning of the output sequence. + * \param binary_op The transformation operation. + * \param pred The predicate operation. + * \return The end of the output sequence. + * + * \tparam InputIterator1 is a model of Input Iterator + * and \c InputIterator1's \c value_type is convertible to \c BinaryFunction's \c first_argument_type. + * \tparam InputIterator2 is a model of Input Iterator + * and \c InputIterator2's \c value_type is convertible to \c BinaryFunction's \c second_argument_type. + * \tparam ForwardIterator is a model of Forward Iterator. + * \tparam BinaryFunction is a model of Binary Function + * and \c BinaryFunction's \c result_type is convertible to \c OutputIterator's \c value_type. + * \tparam Predicate is a model of Predicate. + * + * \pre \p first1 may equal \p result, but the range [first1, last1) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * \pre \p first2 may equal \p result, but the range [first2, first2 + (last1 - first1)) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * \pre \p stencil may equal \p result, but the range [stencil, stencil + (last1 - first1)) shall not overlap the range [result, result + (last1 - first1)) otherwise. + * + * The following code snippet demonstrates how to use \p transform_if: + * + * \code + * #include + * #include + * + * int input1[6] = {-5, 0, 2, 3, 2, 4}; + * int input2[6] = { 3, 6, -2, 1, 2, 3}; + * int stencil[8] = { 1, 0, 1, 0, 1, 0}; + * int output[6]; + * + * thrust::plus op; + * thrust::identity identity; + * + * thrust::transform_if(input1, input1 + 6, input2, stencil, output, op, identity); + * + * // output is now {-2, 0, 0, 3, 4, 4}; + * \endcode + * + * \see thrust::transform + */ +template + ForwardIterator transform_if(InputIterator1 first1, InputIterator1 last1, + InputIterator2 first2, + InputIterator3 stencil, + ForwardIterator result, + BinaryFunction binary_op, + Predicate pred); + + +/*! \} // end transformations + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform_reduce.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform_reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..11d6b84c3eb12a6ae6310d3cedc0b5a5a2e31028 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform_reduce.h @@ -0,0 +1,194 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file transform_reduce.h + * \brief Fused transform / reduction + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup reductions + * \{ + * \addtogroup transformed_reductions Transformed Reductions + * \ingroup reductions + * \{ + */ + + +/*! \p transform_reduce fuses the \p transform and \p reduce operations. + * \p transform_reduce is equivalent to performing a transformation defined by + * \p unary_op into a temporary sequence and then performing \p reduce on the + * transformed sequence. In most cases, fusing these two operations together is + * more efficient, since fewer memory reads and writes are required. + * + * \p transform_reduce performs a reduction on the transformation of the + * sequence [first, last) according to \p unary_op. Specifically, + * \p unary_op is applied to each element of the sequence and then the result + * is reduced to a single value with \p binary_op using the initial value + * \p init. Note that the transformation \p unary_op is not applied to + * the initial value \p init. The order of reduction is not specified, + * so \p binary_op must be both commutative and associative. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param unary_op The function to apply to each element of the input sequence. + * \param init The result is initialized to this value. + * \param binary_op The reduction operation. + * \return The result of the transformed reduction. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type. + * \tparam UnaryFunction is a model of Unary Function, + * and \p UnaryFunction's \c result_type is convertible to \c OutputType. + * \tparam OutputType is a model of Assignable, + * and is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type. + * \tparam BinaryFunction is a model of Binary Function, + * and \p BinaryFunction's \c result_type is convertible to \p OutputType. + * + * The following code snippet demonstrates how to use \p transform_reduce + * to compute the maximum value of the absolute value of the elements + * of a range using the \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * #include + * + * template + * struct absolute_value : public unary_function + * { + * __host__ __device__ T operator()(const T &x) const + * { + * return x < T(0) ? -x : x; + * } + * }; + * + * ... + * + * int data[6] = {-1, 0, -2, -2, 1, -3}; + * int result = thrust::transform_reduce(thrust::host, + * data, data + 6, + * absolute_value(), + * 0, + * thrust::maximum()); + * // result == 3 + * \endcode + * + * \see \c transform + * \see \c reduce + */ +template +__host__ __device__ + OutputType transform_reduce(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + UnaryFunction unary_op, + OutputType init, + BinaryFunction binary_op); + + +/*! \p transform_reduce fuses the \p transform and \p reduce operations. + * \p transform_reduce is equivalent to performing a transformation defined by + * \p unary_op into a temporary sequence and then performing \p reduce on the + * transformed sequence. In most cases, fusing these two operations together is + * more efficient, since fewer memory reads and writes are required. + * + * \p transform_reduce performs a reduction on the transformation of the + * sequence [first, last) according to \p unary_op. Specifically, + * \p unary_op is applied to each element of the sequence and then the result + * is reduced to a single value with \p binary_op using the initial value + * \p init. Note that the transformation \p unary_op is not applied to + * the initial value \p init. The order of reduction is not specified, + * so \p binary_op must be both commutative and associative. + * + * \param first The beginning of the sequence. + * \param last The end of the sequence. + * \param unary_op The function to apply to each element of the input sequence. + * \param init The result is initialized to this value. + * \param binary_op The reduction operation. + * \return The result of the transformed reduction. + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type. + * \tparam UnaryFunction is a model of Unary Function, + * and \p UnaryFunction's \c result_type is convertible to \c OutputType. + * \tparam OutputType is a model of Assignable, + * and is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type. + * \tparam BinaryFunction is a model of Binary Function, + * and \p BinaryFunction's \c result_type is convertible to \p OutputType. + * + * The following code snippet demonstrates how to use \p transform_reduce + * to compute the maximum value of the absolute value of the elements + * of a range. + * + * \code + * #include + * #include + * + * template + * struct absolute_value : public unary_function + * { + * __host__ __device__ T operator()(const T &x) const + * { + * return x < T(0) ? -x : x; + * } + * }; + * + * ... + * + * int data[6] = {-1, 0, -2, -2, 1, -3}; + * int result = thrust::transform_reduce(data, data + 6, + * absolute_value(), + * 0, + * thrust::maximum()); + * // result == 3 + * \endcode + * + * \see \c transform + * \see \c reduce + */ +template + OutputType transform_reduce(InputIterator first, + InputIterator last, + UnaryFunction unary_op, + OutputType init, + BinaryFunction binary_op); + + +/*! \} // end transformed_reductions + * \} // end reductions + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform_scan.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..6c0fe81166806eb3c0d8e0557fcdc36505a30062 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/transform_scan.h @@ -0,0 +1,320 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file transform_scan.h + * \brief Fused transform / prefix-sum + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup algorithms + */ + +/*! \addtogroup prefixsums Prefix Sums + * \ingroup algorithms + * \{ + */ + +/*! \addtogroup transformed_prefixsums Transformed Prefix Sums + * \ingroup prefixsums + * \{ + */ + + +/*! \p transform_inclusive_scan fuses the \p transform and \p inclusive_scan + * operations. \p transform_inclusive_scan is equivalent to performing a + * tranformation defined by \p unary_op into a temporary sequence and then + * performing an \p inclusive_scan on the tranformed sequence. In most + * cases, fusing these two operations together is more efficient, since + * fewer memory reads and writes are required. In \p transform_inclusive_scan, + * unary_op(\*first) is assigned to \*result and the result + * of binary_op(unary_op(\*first), unary_op(\*(first + 1))) is + * assigned to \*(result + 1), and so on. The transform scan + * operation is permitted to be in-place. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param unary_op The function used to tranform the input sequence. + * \param binary_op The associatve operator used to 'sum' transformed values. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c unary_op's input type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and accepts inputs of \c InputIterator's \c value_type. \c UnaryFunction's result_type + * is convertable to \c OutputIterator's \c value_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p transform_inclusive_scan using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::negate unary_op; + * thrust::plus binary_op; + * + * thrust::transform_inclusive_scan(thrust::host, data, data + 6, data, unary_op, binary_op); // in-place scan + * + * // data is now {-1, -1, -3, -5, -6, -9} + * \endcode + * + * \see \p transform + * \see \p inclusive_scan + * + */ +template +__host__ __device__ + OutputIterator transform_inclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + UnaryFunction unary_op, + AssociativeOperator binary_op); + + +/*! \p transform_inclusive_scan fuses the \p transform and \p inclusive_scan + * operations. \p transform_inclusive_scan is equivalent to performing a + * tranformation defined by \p unary_op into a temporary sequence and then + * performing an \p inclusive_scan on the tranformed sequence. In most + * cases, fusing these two operations together is more efficient, since + * fewer memory reads and writes are required. In \p transform_inclusive_scan, + * unary_op(\*first) is assigned to \*result and the result + * of binary_op(unary_op(\*first), unary_op(\*(first + 1))) is + * assigned to \*(result + 1), and so on. The transform scan + * operation is permitted to be in-place. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param unary_op The function used to tranform the input sequence. + * \param binary_op The associatve operator used to 'sum' transformed values. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c unary_op's input type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and accepts inputs of \c InputIterator's \c value_type. \c UnaryFunction's result_type + * is convertable to \c OutputIterator's \c value_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p transform_inclusive_scan + * + * \code + * #include + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::negate unary_op; + * thrust::plus binary_op; + * + * thrust::transform_inclusive_scan(data, data + 6, data, unary_op, binary_op); // in-place scan + * + * // data is now {-1, -1, -3, -5, -6, -9} + * \endcode + * + * \see \p transform + * \see \p inclusive_scan + * + */ +template + OutputIterator transform_inclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result, + UnaryFunction unary_op, + AssociativeOperator binary_op); + + +/*! \p transform_exclusive_scan fuses the \p transform and \p exclusive_scan + * operations. \p transform_exclusive_scan is equivalent to performing a + * tranformation defined by \p unary_op into a temporary sequence and then + * performing an \p exclusive_scan on the tranformed sequence. In most + * cases, fusing these two operations together is more efficient, since + * fewer memory reads and writes are required. In + * \p transform_exclusive_scan, \p init is assigned to \*result + * and the result of binary_op(init, unary_op(\*first)) is assigned + * to \*(result + 1), and so on. The transform scan operation is + * permitted to be in-place. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param unary_op The function used to tranform the input sequence. + * \param init The initial value of the \p exclusive_scan + * \param binary_op The associatve operator used to 'sum' transformed values. + * \return The end of the output sequence. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c unary_op's input type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and accepts inputs of \c InputIterator's \c value_type. \c UnaryFunction's result_type + * is convertable to \c OutputIterator's \c value_type. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p transform_exclusive_scan using the + * \p thrust::host execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::negate unary_op; + * thrust::plus binary_op; + * + * thrust::transform_exclusive_scan(thrust::host, data, data + 6, data, unary_op, 4, binary_op); // in-place scan + * + * // data is now {4, 3, 3, 1, -1, -2} + * \endcode + * + * \see \p transform + * \see \p exclusive_scan + * + */ +template +__host__ __device__ + OutputIterator transform_exclusive_scan(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + UnaryFunction unary_op, + T init, + AssociativeOperator binary_op); + + +/*! \p transform_exclusive_scan fuses the \p transform and \p exclusive_scan + * operations. \p transform_exclusive_scan is equivalent to performing a + * tranformation defined by \p unary_op into a temporary sequence and then + * performing an \p exclusive_scan on the tranformed sequence. In most + * cases, fusing these two operations together is more efficient, since + * fewer memory reads and writes are required. In + * \p transform_exclusive_scan, \p init is assigned to \*result + * and the result of binary_op(init, unary_op(\*first)) is assigned + * to \*(result + 1), and so on. The transform scan operation is + * permitted to be in-place. + * + * \param first The beginning of the input sequence. + * \param last The end of the input sequence. + * \param result The beginning of the output sequence. + * \param unary_op The function used to tranform the input sequence. + * \param init The initial value of the \p exclusive_scan + * \param binary_op The associatve operator used to 'sum' transformed values. + * \return The end of the output sequence. + * + * \tparam InputIterator is a model of Input Iterator + * and \c InputIterator's \c value_type is convertible to \c unary_op's input type. + * \tparam OutputIterator is a model of Output Iterator. + * \tparam UnaryFunction is a model of Unary Function + * and accepts inputs of \c InputIterator's \c value_type. \c UnaryFunction's result_type + * is convertable to \c OutputIterator's \c value_type. + * \tparam T is convertible to \c OutputIterator's \c value_type. + * \tparam AssociativeOperator is a model of Binary Function + * and \c AssociativeOperator's \c result_type is + * convertible to \c OutputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p transform_exclusive_scan + * + * \code + * #include + * + * int data[6] = {1, 0, 2, 2, 1, 3}; + * + * thrust::negate unary_op; + * thrust::plus binary_op; + * + * thrust::transform_exclusive_scan(data, data + 6, data, unary_op, 4, binary_op); // in-place scan + * + * // data is now {4, 3, 3, 1, -1, -2} + * \endcode + * + * \see \p transform + * \see \p exclusive_scan + * + */ +template + OutputIterator transform_exclusive_scan(InputIterator first, + InputIterator last, + OutputIterator result, + UnaryFunction unary_op, + T init, + AssociativeOperator binary_op); + + +/*! \} // end transformed_prefixsums + */ + + +/*! \} // end prefixsums + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/tuple.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/tuple.h new file mode 100644 index 0000000000000000000000000000000000000000..04f3154a3b3b09cc8bd29f81bdf6b5ebc76ac883 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/tuple.h @@ -0,0 +1,575 @@ +/* + * Copyright 2008-2018 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file tuple.h + * \brief A type encapsulating a heterogeneous collection of elements. + */ + +/* + * Copyright (C) 1999, 2000 Jaakko Järvi (jaakko.jarvi@cs.utu.fi) + * + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying NOTICE file for the complete license) + * + * For more information, see http://www.boost.org + */ + +#pragma once + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup utility + * \{ + */ + +/*! \addtogroup tuple + * \{ + */ + +/*! \cond + */ + +struct null_type; + +/*! \endcond + */ + +/*! This metafunction returns the type of a + * \p tuple's Nth element. + * + * \tparam N This parameter selects the element of interest. + * \tparam T A \c tuple type of interest. + * + * \see pair + * \see tuple + */ +template struct tuple_element; + +/*! This metafunction returns the number of elements + * of a \p tuple type of interest. + * + * \tparam T A \c tuple type of interest. + * + * \see pair + * \see tuple + */ +template struct tuple_size; + + +// get function for non-const cons-lists, returns a reference to the element + +/*! The \p get function returns a reference to a \p tuple element of + * interest. + * + * \param t A reference to a \p tuple of interest. + * \return A reference to \p t's Nth element. + * + * \tparam N The index of the element of interest. + * + * The following code snippet demonstrates how to use \p get to print + * the value of a \p tuple element. + * + * \code + * #include + * #include + * ... + * thrust::tuple t(13, "thrust"); + * + * std::cout << "The 1st value of t is " << thrust::get<0>(t) << std::endl; + * \endcode + * + * \see pair + * \see tuple + */ +template +__host__ __device__ +inline typename access_traits< + typename tuple_element >::type + >::non_const_type +get(detail::cons& t); + + +/*! The \p get function returns a \c const reference to a \p tuple element of + * interest. + * + * \param t A reference to a \p tuple of interest. + * \return A \c const reference to \p t's Nth element. + * + * \tparam N The index of the element of interest. + * + * The following code snippet demonstrates how to use \p get to print + * the value of a \p tuple element. + * + * \code + * #include + * #include + * ... + * thrust::tuple t(13, "thrust"); + * + * std::cout << "The 1st value of t is " << thrust::get<0>(t) << std::endl; + * \endcode + * + * \see pair + * \see tuple + */ +template +__host__ __device__ +inline typename access_traits< + typename tuple_element >::type + >::const_type +get(const detail::cons& t); + + + +/*! \brief \p tuple is a class template that can be instantiated with up to ten + * arguments. Each template argument specifies the type of element in the \p + * tuple. Consequently, tuples are heterogeneous, fixed-size collections of + * values. An instantiation of \p tuple with two arguments is similar to an + * instantiation of \p pair with the same two arguments. Individual elements + * of a \p tuple may be accessed with the \p get function. + * + * \tparam TN The type of the N \c tuple element. Thrust's \p tuple + * type currently supports up to ten elements. + * + * The following code snippet demonstrates how to create a new \p tuple object + * and inspect and modify the value of its elements. + * + * \code + * #include + * #include + * + * int main() { + * // Create a tuple containing an `int`, a `float`, and a string. + * thrust::tuple t(13, 0.1f, "thrust"); + * + * // Individual members are accessed with the free function `get`. + * std::cout << "The first element's value is " << thrust::get<0>(t) << std::endl; + * + * // ... or the member function `get`. + * std::cout << "The second element's value is " << t.get<1>() << std::endl; + * + * // We can also modify elements with the same function. + * thrust::get<0>(t) += 10; + * } + * \endcode + * + * \see pair + * \see get + * \see make_tuple + * \see tuple_element + * \see tuple_size + * \see tie + */ +template + class tuple + /*! \cond + */ + : public detail::map_tuple_to_cons::type + /*! \endcond + */ +{ + /*! \cond + */ + + private: + typedef typename detail::map_tuple_to_cons::type inherited; + + /*! \endcond + */ + + public: + + /*! \p tuple's no-argument constructor initializes each element. + */ + inline __host__ __device__ + tuple(void) {} + + /*! \p tuple's one-argument constructor copy constructs the first element from the given parameter + * and intializes all other elements. + * \param t0 The value to assign to this \p tuple's first element. + */ + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0) + : inherited(t0, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + /*! \p tuple's one-argument constructor copy constructs the first two elements from the given parameters + * and intializes all other elements. + * \param t0 The value to assign to this \p tuple's first element. + * \param t1 The value to assign to this \p tuple's second element. + * \note \p tuple's constructor has ten variants of this form, the rest of which are ommitted here for brevity. + */ + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1) + : inherited(t0, t1, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + /*! \cond + */ + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2) + : inherited(t0, t1, t2, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3) + : inherited(t0, t1, t2, t3, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3, + typename access_traits::parameter_type t4) + : inherited(t0, t1, t2, t3, t4, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3, + typename access_traits::parameter_type t4, + typename access_traits::parameter_type t5) + : inherited(t0, t1, t2, t3, t4, t5, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3, + typename access_traits::parameter_type t4, + typename access_traits::parameter_type t5, + typename access_traits::parameter_type t6) + : inherited(t0, t1, t2, t3, t4, t5, t6, + static_cast(null_type()), + static_cast(null_type()), + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3, + typename access_traits::parameter_type t4, + typename access_traits::parameter_type t5, + typename access_traits::parameter_type t6, + typename access_traits::parameter_type t7) + : inherited(t0, t1, t2, t3, t4, t5, t6, t7, + static_cast(null_type()), + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3, + typename access_traits::parameter_type t4, + typename access_traits::parameter_type t5, + typename access_traits::parameter_type t6, + typename access_traits::parameter_type t7, + typename access_traits::parameter_type t8) + : inherited(t0, t1, t2, t3, t4, t5, t6, t7, t8, + static_cast(null_type())) {} + + inline __host__ __device__ + tuple(typename access_traits::parameter_type t0, + typename access_traits::parameter_type t1, + typename access_traits::parameter_type t2, + typename access_traits::parameter_type t3, + typename access_traits::parameter_type t4, + typename access_traits::parameter_type t5, + typename access_traits::parameter_type t6, + typename access_traits::parameter_type t7, + typename access_traits::parameter_type t8, + typename access_traits::parameter_type t9) + : inherited(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) {} + + + template + inline __host__ __device__ + tuple(const detail::cons& p) : inherited(p) {} + + __thrust_exec_check_disable__ + template + inline __host__ __device__ + tuple& operator=(const detail::cons& k) + { + inherited::operator=(k); + return *this; + } + + /*! \endcond + */ + + /*! This assignment operator allows assigning the first two elements of this \p tuple from a \p pair. + * \param k A \p pair to assign from. + */ + __thrust_exec_check_disable__ + template + __host__ __device__ inline + tuple& operator=(const thrust::pair& k) { + //BOOST_STATIC_ASSERT(length::value == 2);// check_length = 2 + this->head = k.first; + this->tail.head = k.second; + return *this; + } + + /*! \p swap swaps the elements of two tuples. + * + * \param t The other tuple with which to swap. + */ + inline __host__ __device__ + void swap(tuple &t) + { + inherited::swap(t); + } +}; + +/*! \cond + */ + +template <> +class tuple : + public null_type +{ +public: + typedef null_type inherited; +}; + +/*! \endcond + */ + + +/*! This version of \p make_tuple creates a new \c tuple object from a + * single object. + * + * \param t0 The object to copy from. + * \return A \p tuple object with a single member which is a copy of \p t0. + */ +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0); + +/*! This version of \p make_tuple creates a new \c tuple object from two + * objects. + * + * \param t0 The first object to copy from. + * \param t1 The second object to copy from. + * \return A \p tuple object with two members which are copies of \p t0 + * and \p t1. + * + * \note \p make_tuple has ten variants, the rest of which are omitted here + * for brevity. + */ +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1); + +/*! This version of \p tie creates a new \c tuple whose single element is + * a reference which refers to this function's argument. + * + * \param t0 The object to reference. + * \return A \p tuple object with one member which is a reference to \p t0. + */ +template +__host__ __device__ inline +tuple tie(T0& t0); + +/*! This version of \p tie creates a new \c tuple of references object which + * refers to this function's arguments. + * + * \param t0 The first object to reference. + * \param t1 The second object to reference. + * \return A \p tuple object with two members which are references to \p t0 + * and \p t1. + * + * \note \p tie has ten variants, the rest of which are omitted here for + * brevity. + */ +template +__host__ __device__ inline +tuple tie(T0& t0, T1& t1); + +/*! \p swap swaps the contents of two tuples. + * + * \param x The first \p tuple to swap. + * \param y The second \p tuple to swap. + */ +template< + typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, + typename U0, typename U1, typename U2, typename U3, typename U4, typename U5, typename U6, typename U7, typename U8, typename U9 +> +inline __host__ __device__ +void swap(tuple &x, + tuple &y); + + + +/*! \cond + */ + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8); + +template +__host__ __device__ inline + typename detail::make_tuple_mapper::type + make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8, const T9& t9); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6, T7 &t7); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6, T7 &t7, T8 &t8); + +template +__host__ __device__ inline +tuple tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6, T7 &t7, T8 &t8, T9 &t9); + + +__host__ __device__ inline +bool operator==(const null_type&, const null_type&); + +__host__ __device__ inline +bool operator>=(const null_type&, const null_type&); + +__host__ __device__ inline +bool operator<=(const null_type&, const null_type&); + +__host__ __device__ inline +bool operator!=(const null_type&, const null_type&); + +__host__ __device__ inline +bool operator<(const null_type&, const null_type&); + +__host__ __device__ inline +bool operator>(const null_type&, const null_type&); + +/*! \endcond + */ + +/*! \} // tuple + */ + +/*! \} // utility + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/uninitialized_copy.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/uninitialized_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..94c2763e3e4639fe98fe2a9f6f543f3690ba30f7 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/uninitialized_copy.h @@ -0,0 +1,299 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file uninitialized_copy.h + * \brief Copy construction into a range of uninitialized elements from a source range + */ + +#pragma once + +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup copying + * \{ + */ + + +/*! In \c thrust, the function \c thrust::device_new allocates memory for + * an object and then creates an object at that location by calling a constructor. + * Occasionally, however, it is useful to separate those two operations. + * If each iterator in the range [result, result + (last - first)) points + * to uninitialized memory, then \p uninitialized_copy creates a copy of + * [first, last) in that range. That is, for each iterator \c i in + * the input, \p uninitialized_copy creates a copy of \c *i in the location pointed + * to by the corresponding iterator in the output range by \p ForwardIterator's + * \c value_type's copy constructor with *i as its argument. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element of the input range to copy from. + * \param last The last element of the input range to copy from. + * \param result The first element of the output range to copy to. + * \return An iterator pointing to the last element of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator. + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes + * a single argument whose type is \p InputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p uninitialized_copy to initialize + * a range of uninitialized memory using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * #include + * + * struct Int + * { + * __host__ __device__ + * Int(int x) : val(x) {} + * int val; + * }; + * ... + * const int N = 137; + * + * Int val(46); + * thrust::device_vector input(N, val); + * thrust::device_ptr array = thrust::device_malloc(N); + * thrust::uninitialized_copy(thrust::device, input.begin(), input.end(), array); + * + * // Int x = array[i]; + * // x.val == 46 for all 0 <= i < N + * \endcode + * + * \see https://en.cppreference.com/w/cpp/memory/uninitialized_copy + * \see \c copy + * \see \c uninitialized_fill + * \see \c device_new + * \see \c device_malloc + */ +template +__host__ __device__ + ForwardIterator uninitialized_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + ForwardIterator result); + + +/*! In \c thrust, the function \c thrust::device_new allocates memory for + * an object and then creates an object at that location by calling a constructor. + * Occasionally, however, it is useful to separate those two operations. + * If each iterator in the range [result, result + (last - first)) points + * to uninitialized memory, then \p uninitialized_copy creates a copy of + * [first, last) in that range. That is, for each iterator \c i in + * the input, \p uninitialized_copy creates a copy of \c *i in the location pointed + * to by the corresponding iterator in the output range by \p ForwardIterator's + * \c value_type's copy constructor with *i as its argument. + * + * \param first The first element of the input range to copy from. + * \param last The last element of the input range to copy from. + * \param result The first element of the output range to copy to. + * \return An iterator pointing to the last element of the output range. + * + * \tparam InputIterator is a model of Input Iterator. + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes + * a single argument whose type is \p InputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p uninitialized_copy to initialize + * a range of uninitialized memory. + * + * \code + * #include + * #include + * #include + * + * struct Int + * { + * __host__ __device__ + * Int(int x) : val(x) {} + * int val; + * }; + * ... + * const int N = 137; + * + * Int val(46); + * thrust::device_vector input(N, val); + * thrust::device_ptr array = thrust::device_malloc(N); + * thrust::uninitialized_copy(input.begin(), input.end(), array); + * + * // Int x = array[i]; + * // x.val == 46 for all 0 <= i < N + * \endcode + * + * \see https://en.cppreference.com/w/cpp/memory/uninitialized_copy + * \see \c copy + * \see \c uninitialized_fill + * \see \c device_new + * \see \c device_malloc + */ +template + ForwardIterator uninitialized_copy(InputIterator first, + InputIterator last, + ForwardIterator result); + + +/*! In \c thrust, the function \c thrust::device_new allocates memory for + * an object and then creates an object at that location by calling a constructor. + * Occasionally, however, it is useful to separate those two operations. + * If each iterator in the range [result, result + n) points + * to uninitialized memory, then \p uninitialized_copy_n creates a copy of + * [first, first + n) in that range. That is, for each iterator \c i in + * the input, \p uninitialized_copy_n creates a copy of \c *i in the location pointed + * to by the corresponding iterator in the output range by \p InputIterator's + * \c value_type's copy constructor with *i as its argument. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The first element of the input range to copy from. + * \param n The number of elements to copy. + * \param result The first element of the output range to copy to. + * \return An iterator pointing to the last element of the output range. + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator. + * \tparam Size is an integral type. + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes + * a single argument whose type is \p InputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, first + n) and the range [result, result + n) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p uninitialized_copy to initialize + * a range of uninitialized memory using the \p thrust::device execution policy for + * parallelization: + * + * \code + * #include + * #include + * #include + * #include + * + * struct Int + * { + * __host__ __device__ + * Int(int x) : val(x) {} + * int val; + * }; + * ... + * const int N = 137; + * + * Int val(46); + * thrust::device_vector input(N, val); + * thrust::device_ptr array = thrust::device_malloc(N); + * thrust::uninitialized_copy_n(thrust::device, input.begin(), N, array); + * + * // Int x = array[i]; + * // x.val == 46 for all 0 <= i < N + * \endcode + * + * \see https://en.cppreference.com/w/cpp/memory/uninitialized_copy + * \see \c uninitialized_copy + * \see \c copy + * \see \c uninitialized_fill + * \see \c device_new + * \see \c device_malloc + */ +template +__host__ __device__ + ForwardIterator uninitialized_copy_n(const thrust::detail::execution_policy_base &exec, + InputIterator first, + Size n, + ForwardIterator result); + + +/*! In \c thrust, the function \c thrust::device_new allocates memory for + * an object and then creates an object at that location by calling a constructor. + * Occasionally, however, it is useful to separate those two operations. + * If each iterator in the range [result, result + n) points + * to uninitialized memory, then \p uninitialized_copy_n creates a copy of + * [first, first + n) in that range. That is, for each iterator \c i in + * the input, \p uninitialized_copy_n creates a copy of \c *i in the location pointed + * to by the corresponding iterator in the output range by \p InputIterator's + * \c value_type's copy constructor with *i as its argument. + * + * \param first The first element of the input range to copy from. + * \param n The number of elements to copy. + * \param result The first element of the output range to copy to. + * \return An iterator pointing to the last element of the output range. + * + * \tparam InputIterator is a model of Input Iterator. + * \tparam Size is an integral type. + * \tparam ForwardIterator is a model of Forward Iterator, + * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that takes + * a single argument whose type is \p InputIterator's \c value_type. + * + * \pre \p first may equal \p result, but the range [first, first + n) and the range [result, result + n) shall not overlap otherwise. + * + * The following code snippet demonstrates how to use \p uninitialized_copy to initialize + * a range of uninitialized memory. + * + * \code + * #include + * #include + * #include + * + * struct Int + * { + * __host__ __device__ + * Int(int x) : val(x) {} + * int val; + * }; + * ... + * const int N = 137; + * + * Int val(46); + * thrust::device_vector input(N, val); + * thrust::device_ptr array = thrust::device_malloc(N); + * thrust::uninitialized_copy_n(input.begin(), N, array); + * + * // Int x = array[i]; + * // x.val == 46 for all 0 <= i < N + * \endcode + * + * \see https://en.cppreference.com/w/cpp/memory/uninitialized_copy + * \see \c uninitialized_copy + * \see \c copy + * \see \c uninitialized_fill + * \see \c device_new + * \see \c device_malloc + */ +template + ForwardIterator uninitialized_copy_n(InputIterator first, + Size n, + ForwardIterator result); + + +/*! \} // copying + */ + +THRUST_NAMESPACE_END + +#include diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/unique.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/unique.h new file mode 100644 index 0000000000000000000000000000000000000000..234cd4935693ff002f597b76cdf0ea497105d075 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/unique.h @@ -0,0 +1,1140 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file unique.h + * \brief Move unique elements to the front of a range + */ + +#pragma once + +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup stream_compaction + * \{ + */ + + +/*! For each group of consecutive elements in the range [first, last) + * with the same value, \p unique removes all but the first element of + * the group. The return value is an iterator \c new_last such that + * no two consecutive elements in the range [first, new_last) are + * equal. The iterators in the range [new_last, last) are all still + * dereferenceable, but the elements that they point to are unspecified. + * \p unique is stable, meaning that the relative order of elements that are + * not removed is unchanged. + * + * This version of \p unique uses \c operator== to test for equality. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \return The end of the unique range [first, new_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is a model of Equality Comparable. + * + * The following code snippet demonstrates how to use \p unique to + * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution policy + * for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int *new_end = thrust::unique(thrust::host, A, A + N); + * // The first four values of A are now {1, 3, 2, 1} + * // Values beyond new_end are unspecified. + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/unique + * \see unique_copy + */ +template +__host__ __device__ +ForwardIterator unique(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last); + + +/*! For each group of consecutive elements in the range [first, last) + * with the same value, \p unique removes all but the first element of + * the group. The return value is an iterator \c new_last such that + * no two consecutive elements in the range [first, new_last) are + * equal. The iterators in the range [new_last, last) are all still + * dereferenceable, but the elements that they point to are unspecified. + * \p unique is stable, meaning that the relative order of elements that are + * not removed is unchanged. + * + * This version of \p unique uses \c operator== to test for equality. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \return The end of the unique range [first, new_last). + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is a model of Equality Comparable. + * + * The following code snippet demonstrates how to use \p unique to + * compact a sequence of numbers to remove consecutive duplicates. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int *new_end = thrust::unique(A, A + N); + * // The first four values of A are now {1, 3, 2, 1} + * // Values beyond new_end are unspecified. + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/unique + * \see unique_copy + */ +template +ForwardIterator unique(ForwardIterator first, + ForwardIterator last); + + +/*! For each group of consecutive elements in the range [first, last) + * with the same value, \p unique removes all but the first element of + * the group. The return value is an iterator \c new_last such that + * no two consecutive elements in the range [first, new_last) are + * equal. The iterators in the range [new_last, last) are all still + * dereferenceable, but the elements that they point to are unspecified. + * \p unique is stable, meaning that the relative order of elements that are + * not removed is unchanged. + * + * This version of \p unique uses the function object \p binary_pred to test + * for equality. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param binary_pred The binary predicate used to determine equality. + * \return The end of the unique range [first, new_last) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p unique to + * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution policy + * for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int *new_end = thrust::unique(thrust::host, A, A + N, thrust::equal_to()); + * // The first four values of A are now {1, 3, 2, 1} + * // Values beyond new_end are unspecified. + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/unique + * \see unique_copy + */ +template +__host__ __device__ +ForwardIterator unique(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + BinaryPredicate binary_pred); + + +/*! For each group of consecutive elements in the range [first, last) + * with the same value, \p unique removes all but the first element of + * the group. The return value is an iterator \c new_last such that + * no two consecutive elements in the range [first, new_last) are + * equal. The iterators in the range [new_last, last) are all still + * dereferenceable, but the elements that they point to are unspecified. + * \p unique is stable, meaning that the relative order of elements that are + * not removed is unchanged. + * + * This version of \p unique uses the function object \p binary_pred to test + * for equality. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param binary_pred The binary predicate used to determine equality. + * \return The end of the unique range [first, new_last) + * + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator is mutable, + * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p unique to + * compact a sequence of numbers to remove consecutive duplicates. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int *new_end = thrust::unique(A, A + N, thrust::equal_to()); + * // The first four values of A are now {1, 3, 2, 1} + * // Values beyond new_end are unspecified. + * \endcode + * + * \see https://en.cppreference.com/w/cpp/algorithm/unique + * \see unique_copy + */ +template +ForwardIterator unique(ForwardIterator first, + ForwardIterator last, + BinaryPredicate binary_pred); + + +/*! \p unique_copy copies elements from the range [first, last) + * to a range beginning with \p result, except that in a consecutive group + * of duplicate elements only the first one is copied. The return value + * is the end of the range to which the elements are copied. + * + * The reason there are two different versions of unique_copy is that there + * are two different definitions of what it means for a consecutive group of + * elements to be duplicates. In the first version, the test is simple + * equality: the elements in a range [f, l) are duplicates if, + * for every iterator \p i in the range, either i == f or else + * *i == *(i-1). In the second, the test is an arbitrary + * \p BinaryPredicate \p binary_pred: the elements in [f, l) are + * duplicates if, for every iterator \p i in the range, either i == f + * or else binary_pred(*i, *(i-1)) is \p true. + * + * This version of \p unique_copy uses \c operator== to test for equality. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \return The end of the unique range [result, result_end). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is a model of Equality Comparable. + * \tparam OutputIterator is a model of Output Iterator and + * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_copy to + * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution + * policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int B[N]; + * int *result_end = thrust::unique_copy(thrust::host, A, A + N, B); + * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 + * // Values beyond result_end are unspecified + * \endcode + * + * \see unique + * \see https://en.cppreference.com/w/cpp/algorithm/unique_copy + */ +template +__host__ __device__ +OutputIterator unique_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p unique_copy copies elements from the range [first, last) + * to a range beginning with \p result, except that in a consecutive group + * of duplicate elements only the first one is copied. The return value + * is the end of the range to which the elements are copied. + * + * The reason there are two different versions of unique_copy is that there + * are two different definitions of what it means for a consecutive group of + * elements to be duplicates. In the first version, the test is simple + * equality: the elements in a range [f, l) are duplicates if, + * for every iterator \p i in the range, either i == f or else + * *i == *(i-1). In the second, the test is an arbitrary + * \p BinaryPredicate \p binary_pred: the elements in [f, l) are + * duplicates if, for every iterator \p i in the range, either i == f + * or else binary_pred(*i, *(i-1)) is \p true. + * + * This version of \p unique_copy uses \c operator== to test for equality. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \return The end of the unique range [result, result_end). + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is a model of Equality Comparable. + * \tparam OutputIterator is a model of Output Iterator and + * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. + * + * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_copy to + * compact a sequence of numbers to remove consecutive duplicates. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int B[N]; + * int *result_end = thrust::unique_copy(A, A + N, B); + * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 + * // Values beyond result_end are unspecified + * \endcode + * + * \see unique + * \see https://en.cppreference.com/w/cpp/algorithm/unique_copy + */ +template +OutputIterator unique_copy(InputIterator first, + InputIterator last, + OutputIterator result); + + +/*! \p unique_copy copies elements from the range [first, last) + * to a range beginning with \p result, except that in a consecutive group + * of duplicate elements only the first one is copied. The return value + * is the end of the range to which the elements are copied. + * + * This version of \p unique_copy uses the function object \c binary_pred + * to test for equality. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \param binary_pred The binary predicate used to determine equality. + * \return The end of the unique range [result, result_end). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is a model of Equality Comparable. + * \tparam OutputIterator is a model of Output Iterator and + * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_copy to + * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution + * policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int B[N]; + * int *result_end = thrust::unique_copy(thrust::host, A, A + N, B, thrust::equal_to()); + * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 + * // Values beyond result_end are unspecified. + * \endcode + * + * \see unique + * \see https://en.cppreference.com/w/cpp/algorithm/unique_copy + */ +template +__host__ __device__ +OutputIterator unique_copy(const thrust::detail::execution_policy_base &exec, + InputIterator first, + InputIterator last, + OutputIterator result, + BinaryPredicate binary_pred); + + +/*! \p unique_copy copies elements from the range [first, last) + * to a range beginning with \p result, except that in a consecutive group + * of duplicate elements only the first one is copied. The return value + * is the end of the range to which the elements are copied. + * + * This version of \p unique_copy uses the function object \c binary_pred + * to test for equality. + * + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param result The beginning of the output range. + * \param binary_pred The binary predicate used to determine equality. + * \return The end of the unique range [result, result_end). + * + * \tparam InputIterator is a model of Input Iterator, + * and \p InputIterator's \c value_type is a model of Equality Comparable. + * \tparam OutputIterator is a model of Output Iterator and + * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The range [first,last) and the range [result, result + (last - first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_copy to + * compact a sequence of numbers to remove consecutive duplicates. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int B[N]; + * int *result_end = thrust::unique_copy(A, A + N, B, thrust::equal_to()); + * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4 + * // Values beyond result_end are unspecified. + * \endcode + * + * \see unique + * \see https://en.cppreference.com/w/cpp/algorithm/unique_copy + */ +template +OutputIterator unique_copy(InputIterator first, + InputIterator last, + OutputIterator result, + BinaryPredicate binary_pred); + + +/*! \p unique_by_key is a generalization of \p unique to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key removes all but the first element of + * the group. Similarly, the corresponding values in the range + * [values_first, values_first + (keys_last - keys_first)) + * are also removed. + * + * The return value is a \p pair of iterators (new_keys_last,new_values_last) + * such that no two consecutive elements in the range [keys_first, new_keys_last) + * are equal. + * + * This version of \p unique_by_key uses \c operator== to test for equality and + * \c project1st to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the key range. + * \param keys_last The end of the key range. + * \param values_first The beginning of the value range. + * \return A pair of iterators at end of the ranges [key_first, keys_new_last) and [values_first, values_new_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator1 is a model of Forward Iterator, + * and \p ForwardIterator1 is mutable, + * and \p ForwardIterator's \c value_type is a model of Equality Comparable. + * \tparam ForwardIterator2 is a model of Forward Iterator, + * and \p ForwardIterator2 is mutable. + * + * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_by_key to + * compact a sequence of key/value pairs to remove consecutive duplicates using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values + * + * thrust::pair new_end; + * new_end = thrust::unique_by_key(thrust::host, A, A + N, B); + * + * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. + * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. + * \endcode + * + * \see unique + * \see unique_by_key_copy + * \see reduce_by_key + */ +template +__host__ __device__ + thrust::pair + unique_by_key(const thrust::detail::execution_policy_base &exec, + ForwardIterator1 keys_first, + ForwardIterator1 keys_last, + ForwardIterator2 values_first); + + +/*! \p unique_by_key is a generalization of \p unique to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key removes all but the first element of + * the group. Similarly, the corresponding values in the range + * [values_first, values_first + (keys_last - keys_first)) + * are also removed. + * + * The return value is a \p pair of iterators (new_keys_last,new_values_last) + * such that no two consecutive elements in the range [keys_first, new_keys_last) + * are equal. + * + * This version of \p unique_by_key uses \c operator== to test for equality and + * \c project1st to reduce values with equal keys. + * + * \param keys_first The beginning of the key range. + * \param keys_last The end of the key range. + * \param values_first The beginning of the value range. + * \return A pair of iterators at end of the ranges [key_first, keys_new_last) and [values_first, values_new_last). + * + * \tparam ForwardIterator1 is a model of Forward Iterator, + * and \p ForwardIterator1 is mutable, + * and \p ForwardIterator's \c value_type is a model of Equality Comparable. + * \tparam ForwardIterator2 is a model of Forward Iterator, + * and \p ForwardIterator2 is mutable. + * + * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_by_key to + * compact a sequence of key/value pairs to remove consecutive duplicates. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values + * + * thrust::pair new_end; + * new_end = thrust::unique_by_key(A, A + N, B); + * + * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. + * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. + * \endcode + * + * \see unique + * \see unique_by_key_copy + * \see reduce_by_key + */ +template + thrust::pair + unique_by_key(ForwardIterator1 keys_first, + ForwardIterator1 keys_last, + ForwardIterator2 values_first); + + +/*! \p unique_by_key is a generalization of \p unique to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key removes all but the first element of + * the group. Similarly, the corresponding values in the range + * [values_first, values_first + (keys_last - keys_first)) + * are also removed. + * + * This version of \p unique_by_key uses the function object \c binary_pred + * to test for equality and \c project1st to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the key range. + * \param keys_last The end of the key range. + * \param values_first The beginning of the value range. + * \param binary_pred The binary predicate used to determine equality. + * \return The end of the unique range [first, new_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator1 is a model of Forward Iterator, + * and \p ForwardIterator1 is mutable, + * and \p ForwardIterator's \c value_type is a model of Equality Comparable. + * \tparam ForwardIterator2 is a model of Forward Iterator, + * and \p ForwardIterator2 is mutable. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_by_key to + * compact a sequence of key/value pairs to remove consecutive duplicates using the \p thrust::host + * execution policy for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * new_end = thrust::unique_by_key(thrust::host, keys, keys + N, values, binary_pred); + * + * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. + * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. + * \endcode + * + * \see unique + * \see unique_by_key_copy + * \see reduce_by_key + */ +template +__host__ __device__ + thrust::pair + unique_by_key(const thrust::detail::execution_policy_base &exec, + ForwardIterator1 keys_first, + ForwardIterator1 keys_last, + ForwardIterator2 values_first, + BinaryPredicate binary_pred); + + +/*! \p unique_by_key is a generalization of \p unique to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key removes all but the first element of + * the group. Similarly, the corresponding values in the range + * [values_first, values_first + (keys_last - keys_first)) + * are also removed. + * + * This version of \p unique_by_key uses the function object \c binary_pred + * to test for equality and \c project1st to reduce values with equal keys. + * + * \param keys_first The beginning of the key range. + * \param keys_last The end of the key range. + * \param values_first The beginning of the value range. + * \param binary_pred The binary predicate used to determine equality. + * \return The end of the unique range [first, new_last). + * + * \tparam ForwardIterator1 is a model of Forward Iterator, + * and \p ForwardIterator1 is mutable, + * and \p ForwardIterator's \c value_type is a model of Equality Comparable. + * \tparam ForwardIterator2 is a model of Forward Iterator, + * and \p ForwardIterator2 is mutable. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The range [keys_first, keys_last) and the range [values_first, values_first + (keys_last - keys_first)) shall not overlap. + * + * The following code snippet demonstrates how to use \p unique_by_key to + * compact a sequence of key/value pairs to remove consecutive duplicates. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * new_end = thrust::unique_by_key(keys, keys + N, values, binary_pred); + * + * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4. + * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4. + * \endcode + * + * \see unique + * \see unique_by_key_copy + * \see reduce_by_key + */ +template + thrust::pair + unique_by_key(ForwardIterator1 keys_first, + ForwardIterator1 keys_last, + ForwardIterator2 values_first, + BinaryPredicate binary_pred); + + +/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key_copy copies the first element of the group to + * a range beginning with \c keys_result and the corresponding values from the range + * [values_first, values_first + (keys_last - keys_first)) are copied to a range + * beginning with \c values_result. + * + * This version of \p unique_by_key_copy uses \c operator== to test for equality and + * \c project1st to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_result The beginning of the output key range. + * \param values_result The beginning of the output value range. + * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p unique_by_key_copy to + * compact a sequence of key/value pairs and with equal keys using the \p thrust::host execution policy + * for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * new_end = thrust::unique_by_key_copy(thrust::host, A, A + N, B, C, D); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. + * \endcode + * + * \see unique_copy + * \see unique_by_key + * \see reduce_by_key + */ +template +__host__ __device__ + thrust::pair + unique_by_key_copy(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key_copy copies the first element of the group to + * a range beginning with \c keys_result and the corresponding values from the range + * [values_first, values_first + (keys_last - keys_first)) are copied to a range + * beginning with \c values_result. + * + * This version of \p unique_by_key_copy uses \c operator== to test for equality and + * \c project1st to reduce values with equal keys. + * + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_result The beginning of the output key range. + * \param values_result The beginning of the output value range. + * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). + * + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p unique_by_key_copy to + * compact a sequence of key/value pairs and with equal keys. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * new_end = thrust::unique_by_key_copy(A, A + N, B, C, D); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. + * \endcode + * + * \see unique_copy + * \see unique_by_key + * \see reduce_by_key + */ +template + thrust::pair + unique_by_key_copy(InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_result, + OutputIterator2 values_result); + + +/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key_copy copies the first element of the group to + * a range beginning with \c keys_result and the corresponding values from the range + * [values_first, values_first + (keys_last - keys_first)) are copied to a range + * beginning with \c values_result. + * + * This version of \p unique_by_key_copy uses the function object \c binary_pred + * to test for equality and \c project1st to reduce values with equal keys. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_result The beginning of the output key range. + * \param values_result The beginning of the output value range. + * \param binary_pred The binary predicate used to determine equality. + * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p unique_by_key_copy to + * compact a sequence of key/value pairs and with equal keys using the \p thrust::host execution policy for + * parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * new_end = thrust::unique_by_key_copy(thrust::host, A, A + N, B, C, D, binary_pred); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. + * \endcode + * + * \see unique_copy + * \see unique_by_key + * \see reduce_by_key + */ +template +__host__ __device__ + thrust::pair + unique_by_key_copy(const thrust::detail::execution_policy_base &exec, + InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_result, + OutputIterator2 values_result, + BinaryPredicate binary_pred); + + +/*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs. + * For each group of consecutive keys in the range [keys_first, keys_last) + * that are equal, \p unique_by_key_copy copies the first element of the group to + * a range beginning with \c keys_result and the corresponding values from the range + * [values_first, values_first + (keys_last - keys_first)) are copied to a range + * beginning with \c values_result. + * + * This version of \p unique_by_key_copy uses the function object \c binary_pred + * to test for equality and \c project1st to reduce values with equal keys. + * + * \param keys_first The beginning of the input key range. + * \param keys_last The end of the input key range. + * \param values_first The beginning of the input value range. + * \param keys_result The beginning of the output key range. + * \param values_result The beginning of the output value range. + * \param binary_pred The binary predicate used to determine equality. + * \return A pair of iterators at end of the ranges [keys_result, keys_result_last) and [values_result, values_result_last). + * + * \tparam InputIterator1 is a model of Input Iterator, + * \tparam InputIterator2 is a model of Input Iterator, + * \tparam OutputIterator1 is a model of Output Iterator and + * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type. + * \tparam OutputIterator2 is a model of Output Iterator and + * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * \pre The input ranges shall not overlap either output range. + * + * The following code snippet demonstrates how to use \p unique_by_key_copy to + * compact a sequence of key/value pairs and with equal keys. + * + * \code + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys + * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values + * int C[N]; // output keys + * int D[N]; // output values + * + * thrust::pair new_end; + * thrust::equal_to binary_pred; + * new_end = thrust::unique_by_key_copy(A, A + N, B, C, D, binary_pred); + * + * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4. + * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4. + * \endcode + * + * \see unique_copy + * \see unique_by_key + * \see reduce_by_key + */ +template + thrust::pair + unique_by_key_copy(InputIterator1 keys_first, + InputIterator1 keys_last, + InputIterator2 values_first, + OutputIterator1 keys_result, + OutputIterator2 values_result, + BinaryPredicate binary_pred); + + +/*! \p unique_count counts runs of equal elements in the range [first, last) + * with the same value, + * + * This version of \p unique_count uses the function object \p binary_pred to test for equality. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param binary_pred The binary predicate used to determine equality. + * \return The number of runs of equal elements in [first, new_last) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p unique_count to + * determine a number of runs of equal elements using the \p thrust::host execution policy + * for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int count = thrust::unique_count(thrust::host, A, A + N, thrust::equal_to()); + * // count is now 4 + * \endcode + * + * \see unique_copy + * \see unique_by_key_copy + * \see reduce_by_key_copy + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + unique_count(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last, + BinaryPredicate binary_pred); + + +/*! \p unique_count counts runs of equal elements in the range [first, last) + * with the same value, + * + * This version of \p unique_count uses \c operator== to test for equality. + * + * The algorithm's execution is parallelized as determined by \p exec. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param binary_pred The binary predicate used to determine equality. + * \return The number of runs of equal elements in [first, new_last) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p unique_count to + * determine the number of runs of equal elements using the \p thrust::host execution policy + * for parallelization: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int count = thrust::unique_count(thrust::host, A, A + N); + * // count is now 4 + * \endcode + * + * \see unique_copy + * \see unique_by_key_copy + * \see reduce_by_key_copy + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + unique_count(const thrust::detail::execution_policy_base &exec, + ForwardIterator first, + ForwardIterator last); + + +/*! \p unique_count counts runs of equal elements in the range [first, last) + * with the same value, + * + * This version of \p unique_count uses the function object \p binary_pred to test for equality. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param binary_pred The binary predicate used to determine equality. + * \return The number of runs of equal elements in [first, new_last) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p unique_count to + * determine the number of runs of equal elements: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int count = thrust::unique_count(A, A + N, thrust::equal_to()); + * // count is now 4 + * \endcode + * + * \see unique_copy + * \see unique_by_key_copy + * \see reduce_by_key_copy + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + unique_count(ForwardIterator first, + ForwardIterator last, + BinaryPredicate binary_pred); + + +/*! \p unique_count counts runs of equal elements in the range [first, last) + * with the same value, + * + * This version of \p unique_count uses \c operator== to test for equality. + * + * \param exec The execution policy to use for parallelization. + * \param first The beginning of the input range. + * \param last The end of the input range. + * \param binary_pred The binary predicate used to determine equality. + * \return The number of runs of equal elements in [first, new_last) + * + * \tparam DerivedPolicy The name of the derived execution policy. + * \tparam ForwardIterator is a model of Forward Iterator, + * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type. + * \tparam BinaryPredicate is a model of Binary Predicate. + * + * The following code snippet demonstrates how to use \p unique_count to + * determine the number of runs of equal elements: + * + * \code + * #include + * #include + * ... + * const int N = 7; + * int A[N] = {1, 3, 3, 3, 2, 2, 1}; + * int count = thrust::unique_count(thrust::host, A, A + N); + * // count is now 4 + * \endcode + * + * \see unique_copy + * \see unique_by_key_copy + * \see reduce_by_key_copy + */ +template +__host__ __device__ + typename thrust::iterator_traits::difference_type + unique_count(ForwardIterator first, + ForwardIterator last); + + +/*! \} // end stream_compaction + */ + +THRUST_NAMESPACE_END + +#include + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/universal_allocator.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/universal_allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..8d85cd20de0d4d0a198019565a5ffbe2df901070 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/universal_allocator.h @@ -0,0 +1,77 @@ +/* + * Copyright 2008-2020 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/*! \file universal_allocator.h + * \brief An allocator which creates new elements in memory accessible to both + * hosts and devices. + */ + +#pragma once + +#include + +// #include the device system's vector header +#define __THRUST_DEVICE_SYSTEM_MEMORY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/memory.h> +#include __THRUST_DEVICE_SYSTEM_MEMORY_HEADER +#undef __THRUST_DEVICE_SYSTEM_MEMORY_HEADER + +THRUST_NAMESPACE_BEGIN + +/** \addtogroup memory_resources Memory Resources + * \ingroup memory_management_classes + * \{ + */ + +/*! \brief An allocator which creates new elements in memory accessible by + * both hosts and devices. + * + * \see https://en.cppreference.com/w/cpp/named_req/Allocator + */ +using thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::universal_allocator; + +/*! \p universal_ptr stores a pointer to an object allocated in memory accessible + * to both hosts and devices. + * + * Algorithms dispatched with this type of pointer will be dispatched to + * either host or device, depending on which backend you are using. Explicit + * policies (\p thrust::device, etc) can be used to specify where an algorithm + * should be run. + * + * \p universal_ptr has pointer semantics: it may be dereferenced safely from + * both hosts and devices and may be manipulated with pointer arithmetic. + * + * \p universal_ptr can be created with \p universal_allocator or by explicitly + * calling its constructor with a raw pointer. + * + * The raw pointer encapsulated by a \p universal_ptr may be obtained by + * either its get method or the \p raw_pointer_cast free function. + * + * \note \p universal_ptr is not a smart pointer; it is the programmer's + * responsibility to deallocate memory pointed to by \p universal_ptr. + * + * \see host_ptr For the documentation of the complete interface which is + * shared by \p universal_ptr. + * \see raw_pointer_cast + */ +template +using universal_ptr = + thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::universal_pointer; + +/*! \} + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/universal_vector.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/universal_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..0ce38fd868c208e0d1bdc6ca1539ad16aeb26c00 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/universal_vector.h @@ -0,0 +1,55 @@ +/* + * Copyright 2008-2020 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*! \file + * \brief A dynamically-sizable array of elements which resides in memory + * accessible to both hosts and devices. + */ + +#pragma once + +#include +#include + +// #include the device system's vector header +#define __THRUST_DEVICE_SYSTEM_VECTOR_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/vector.h> +#include __THRUST_DEVICE_SYSTEM_VECTOR_HEADER +#undef __THRUST_DEVICE_SYSTEM_VECTOR_HEADER + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup containers Containers + * \{ + */ + +/*! A \p universal_vector is a container that supports random access to elements, + * constant time removal of elements at the end, and linear time insertion + * and removal of elements at the beginning or in the middle. The number of + * elements in a \p universal_vector may vary dynamically; memory management is + * automatic. The memory associated with a \p universal_vector resides in memory + * accessible to hosts and devices. + * + * \see https://en.cppreference.com/w/cpp/container/vector + * \see host_vector For the documentation of the complete interface which is + * shared by \p universal_vector. + * \see device_vector + */ +using thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::universal_vector; + +/*! \} // containers + */ + +THRUST_NAMESPACE_END diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/zip_function.h b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/zip_function.h new file mode 100644 index 0000000000000000000000000000000000000000..7653f9b7f3e1fa7e3f2c44cdbcdf65f9e0683af9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/zip_function.h @@ -0,0 +1,212 @@ + +/*! \file thrust/zip_function.h + * \brief Adaptor type that turns an N-ary function object into one that takes + * a tuple of size N so it can easily be used with algorithms taking zip + * iterators + */ + +#pragma once + +#include +#include +#include + +#if THRUST_CPP_DIALECT >= 2011 && !defined(THRUST_LEGACY_GCC) + +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +/*! \addtogroup function_objects Function Objects + * \{ + */ + +/*! \addtogroup function_object_adaptors Function Object Adaptors + * \ingroup function_objects + * \{ + */ + +namespace detail { +namespace zip_detail { + +// Add workaround for decltype(auto) on C++11-only compilers: +#if THRUST_CPP_DIALECT >= 2014 + +__thrust_exec_check_disable__ +template +__host__ __device__ +decltype(auto) apply_impl(Function&& func, Tuple&& args, index_sequence) +{ + return func(thrust::get(THRUST_FWD(args))...); +} + +template +__host__ __device__ +decltype(auto) apply(Function&& func, Tuple&& args) +{ + constexpr auto tuple_size = thrust::tuple_size::type>::value; + return apply_impl(THRUST_FWD(func), THRUST_FWD(args), make_index_sequence{}); +} + +#else // THRUST_CPP_DIALECT + +__thrust_exec_check_disable__ +template +__host__ __device__ +auto apply_impl(Function&& func, Tuple&& args, index_sequence) +THRUST_DECLTYPE_RETURNS(func(thrust::get(THRUST_FWD(args))...)) + +template +__host__ __device__ +auto apply(Function&& func, Tuple&& args) +THRUST_DECLTYPE_RETURNS( + apply_impl( + THRUST_FWD(func), + THRUST_FWD(args), + make_index_sequence< + thrust::tuple_size::type>::value>{}) +) + +#endif // THRUST_CPP_DIALECT + +} // namespace zip_detail +} // namespace detail + +/*! \p zip_function is a function object that allows the easy use of N-ary + * function objects with \p zip_iterators without redefining them to take a + * \p tuple instead of N arguments. + * + * This means that if a functor that takes 2 arguments which could be used with + * the \p transform function and \p device_iterators can be extended to take 3 + * arguments and \p zip_iterators without rewriting the functor in terms of + * \p tuple. + * + * The \p make_zip_function convenience function is provided to avoid having + * to explicitely define the type of the functor when creating a \p zip_function, + * whic is especially helpful when using lambdas as the functor. + * + * \code + * #include + * #include + * #include + * #include + * + * struct SumTuple { + * float operator()(Tuple tup) { + * return std::get<0>(tup) + std::get<1>(tup) + std::get<2>(tup); + * } + * }; + * struct SumArgs { + * float operator()(float a, float b, float c) { + * return a + b + c; + * } + * }; + * + * int main() { + * thrust::device_vector A(3); + * thrust::device_vector B(3); + * thrust::device_vector C(3); + * thrust::device_vector D(3); + * A[0] = 0.f; A[1] = 1.f; A[2] = 2.f; + * B[0] = 1.f; B[1] = 2.f; B[2] = 3.f; + * C[0] = 2.f; C[1] = 3.f; C[2] = 4.f; + * + * // The following four invocations of transform are equivalent + * // Transform with 3-tuple + * thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), C.begin())), + * thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end(), C.end())), + * D.begin(), + * SumTuple{}); + * + * // Transform with 3 parameters + * thrust::zip_function adapted{}; + * thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), C.begin())), + * thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end(), C.end())), + * D.begin(), + * adapted); + * + * // Transform with 3 parameters with convenience function + * thrust::zip_function adapted{}; + * thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), C.begin())), + * thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end(), C.end())), + * D.begin(), + * thrust::make_zip_function(SumArgs{})); + * + * // Transform with 3 parameters with convenience function and lambda + * thrust::zip_function adapted{}; + * thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), C.begin())), + * thrust::make_zip_iterator(thrust::make_tuple(A.end(), B.end(), C.end())), + * D.begin(), + * thrust::make_zip_function([] (float a, float b, float c) { + * return a + b + c; + * })); + * return 0; + * } + * \endcode + * + * \see make_zip_function + * \see zip_iterator + */ +template +class zip_function +{ + public: + __host__ __device__ + zip_function(Function func) : func(std::move(func)) {} + +// Add workaround for decltype(auto) on C++11-only compilers: +#if THRUST_CPP_DIALECT >= 2014 + + template + __host__ __device__ + decltype(auto) operator()(Tuple&& args) const + { + return detail::zip_detail::apply(func, THRUST_FWD(args)); + } + +#else // THRUST_CPP_DIALECT + + // Can't just use THRUST_DECLTYPE_RETURNS here since we need to use + // std::declval for the signature components: + template + __host__ __device__ + auto operator()(Tuple&& args) const + noexcept(noexcept(detail::zip_detail::apply(std::declval(), THRUST_FWD(args)))) + THRUST_TRAILING_RETURN(decltype(detail::zip_detail::apply(std::declval(), THRUST_FWD(args)))) + { + return detail::zip_detail::apply(func, THRUST_FWD(args)); + } + +#endif // THRUST_CPP_DIALECT + + private: + mutable Function func; +}; + +/*! \p make_zip_function creates a \p zip_function from a function object. + * + * \param fun The N-ary function object. + * \return A \p zip_function that takes a N-tuple. + * + * \see zip_function + */ +template +__host__ __device__ +zip_function::type> +make_zip_function(Function&& fun) +{ + using func_t = typename std::decay::type; + return zip_function(THRUST_FWD(fun)); +} + +/*! \} // end function_object_adaptors + */ + +/*! \} // end function_objects + */ + +THRUST_NAMESPACE_END + +#endif