Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/common_functions.h +310 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/cudacc_ext.h +64 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.h +0 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.hpp +1197 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/func_macro.h +57 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.hpp +1128 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/nvfunctional +621 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.hpp +248 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExt.h +1499 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCuda.h +170 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCudaRt.h +146 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtOpenCL.h +220 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtSync.h +411 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImpl.h +469 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCore.h +299 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h +112 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h +133 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h +192 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h +114 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInit.h +343 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDecls.h +73 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDefs.h +565 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxLinkOnce.h +75 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxTypes.h +333 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/adjacent_difference.h +244 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/allocate_unique.h +443 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/binary_search.h +1899 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/complex.h +1047 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/count.h +231 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/select_system.h +84 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/tuple_meta_transform.h +58 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_free.h +65 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_make_unique.h +60 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc.h +100 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc_allocator.h +180 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new.h +86 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new_allocator.h +172 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_reference.h +987 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_vector.h +511 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/equal.h +235 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/event.h +26 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/execution_policy.h +392 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/extrema.h +801 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/fill.h +206 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/for_each.h +278 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/future.h +176 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/generate.h +211 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/inner_product.h +262 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/limits.h +18 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/memory.h +396 -0
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/common_functions.h
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__COMMON_FUNCTIONS_H__)
|
| 61 |
+
#define __COMMON_FUNCTIONS_H__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#include "builtin_types.h"
|
| 72 |
+
#include "host_defines.h"
|
| 73 |
+
|
| 74 |
+
#define __CUDACC_VER__ "__CUDACC_VER__ is no longer supported. Use __CUDACC_VER_MAJOR__, __CUDACC_VER_MINOR__, and __CUDACC_VER_BUILD__ instead."
|
| 75 |
+
|
| 76 |
+
#ifndef __CUDA_API_VER_MAJOR__
|
| 77 |
+
#define __CUDA_API_VER_MAJOR__ __CUDACC_VER_MAJOR__
|
| 78 |
+
#endif /* __CUDA_API_VER_MAJOR__ */
|
| 79 |
+
|
| 80 |
+
#ifndef __CUDA_API_VER_MINOR__
|
| 81 |
+
#define __CUDA_API_VER_MINOR__ __CUDACC_VER_MINOR__
|
| 82 |
+
#endif /* __CUDA_API_VER_MINOR__ */
|
| 83 |
+
|
| 84 |
+
#if !defined(__CUDACC_RTC__)
|
| 85 |
+
#include <string.h>
|
| 86 |
+
#include <time.h>
|
| 87 |
+
|
| 88 |
+
extern "C"
|
| 89 |
+
{
|
| 90 |
+
#endif /* !__CUDACC_RTC__ */
|
| 91 |
+
extern _CRTIMP __host__ __device__ __device_builtin__ __cudart_builtin__ clock_t __cdecl clock(void)
|
| 92 |
+
#if defined(__QNX__)
|
| 93 |
+
asm("clock32")
|
| 94 |
+
#endif
|
| 95 |
+
__THROW;
|
| 96 |
+
extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memset(void*, int, size_t) __THROW;
|
| 97 |
+
extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memcpy(void*, const void*, size_t) __THROW;
|
| 98 |
+
#if !defined(__CUDACC_RTC__)
|
| 99 |
+
}
|
| 100 |
+
#endif /* !__CUDACC_RTC__ */
|
| 101 |
+
|
| 102 |
+
#if defined(__CUDA_ARCH__)
|
| 103 |
+
|
| 104 |
+
#if defined(__CUDACC_RTC__)
|
| 105 |
+
inline __host__ __device__ void* operator new(size_t, void *p) { return p; }
|
| 106 |
+
inline __host__ __device__ void* operator new[](size_t, void *p) { return p; }
|
| 107 |
+
inline __host__ __device__ void operator delete(void*, void*) { }
|
| 108 |
+
inline __host__ __device__ void operator delete[](void*, void*) { }
|
| 109 |
+
#else /* !__CUDACC_RTC__ */
|
| 110 |
+
#ifndef __CUDA_INTERNAL_SKIP_CPP_HEADERS__
|
| 111 |
+
#include <new>
|
| 112 |
+
#endif
|
| 113 |
+
|
| 114 |
+
#if defined (__GNUC__)
|
| 115 |
+
|
| 116 |
+
#define STD \
|
| 117 |
+
std::
|
| 118 |
+
|
| 119 |
+
#else /* __GNUC__ */
|
| 120 |
+
|
| 121 |
+
#define STD
|
| 122 |
+
|
| 123 |
+
#endif /* __GNUC__ */
|
| 124 |
+
|
| 125 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, void*) throw();
|
| 126 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, void*) throw();
|
| 127 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, void*) throw();
|
| 128 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, void*) throw();
|
| 129 |
+
# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__)
|
| 130 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw();
|
| 131 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw();
|
| 132 |
+
#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */
|
| 133 |
+
#endif /* __CUDACC_RTC__ */
|
| 134 |
+
|
| 135 |
+
#if !defined(__CUDACC_RTC__)
|
| 136 |
+
#include <stdio.h>
|
| 137 |
+
#include <stdlib.h>
|
| 138 |
+
#endif /* !__CUDACC_RTC__ */
|
| 139 |
+
|
| 140 |
+
#if defined(__QNX__) && !defined(_LIBCPP_VERSION)
|
| 141 |
+
namespace std {
|
| 142 |
+
#endif
|
| 143 |
+
extern "C"
|
| 144 |
+
{
|
| 145 |
+
extern
|
| 146 |
+
#if !defined(_MSC_VER) || _MSC_VER < 1900
|
| 147 |
+
_CRTIMP
|
| 148 |
+
#endif
|
| 149 |
+
|
| 150 |
+
#if defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) )
|
| 151 |
+
__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...) __THROW;
|
| 152 |
+
#else /* newer glibc */
|
| 153 |
+
__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...);
|
| 154 |
+
#endif /* defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) ) */
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
extern _CRTIMP __host__ __device__ __cudart_builtin__ void* __cdecl malloc(size_t) __THROW;
|
| 158 |
+
extern _CRTIMP __host__ __device__ __cudart_builtin__ void __cdecl free(void*) __THROW;
|
| 159 |
+
|
| 160 |
+
#if defined(_MSC_VER)
|
| 161 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl _alloca(size_t);
|
| 162 |
+
#endif
|
| 163 |
+
|
| 164 |
+
#if defined(__QNX__)
|
| 165 |
+
#undef alloca
|
| 166 |
+
#define alloca(__S) __builtin_alloca(__S)
|
| 167 |
+
#endif
|
| 168 |
+
}
|
| 169 |
+
#if defined(__QNX__) && !defined(_LIBCPP_VERSION)
|
| 170 |
+
} /* std */
|
| 171 |
+
#endif
|
| 172 |
+
|
| 173 |
+
#if !defined(__CUDACC_RTC__)
|
| 174 |
+
#include <assert.h>
|
| 175 |
+
#endif /* !__CUDACC_RTC__ */
|
| 176 |
+
|
| 177 |
+
extern "C"
|
| 178 |
+
{
|
| 179 |
+
#if defined(__CUDACC_RTC__)
|
| 180 |
+
extern __host__ __device__ void __assertfail(const char * __assertion,
|
| 181 |
+
const char *__file,
|
| 182 |
+
unsigned int __line,
|
| 183 |
+
const char *__function,
|
| 184 |
+
size_t charsize);
|
| 185 |
+
#elif defined(__APPLE__)
|
| 186 |
+
#define __builtin_expect(exp,c) (exp)
|
| 187 |
+
extern __host__ __device__ __cudart_builtin__ void __assert_rtn(
|
| 188 |
+
const char *, const char *, int, const char *);
|
| 189 |
+
#elif defined(__ANDROID__)
|
| 190 |
+
extern __host__ __device__ __cudart_builtin__ void __assert2(
|
| 191 |
+
const char *, int, const char *, const char *);
|
| 192 |
+
#elif defined(__QNX__)
|
| 193 |
+
#if !defined(_LIBCPP_VERSION)
|
| 194 |
+
namespace std {
|
| 195 |
+
#endif
|
| 196 |
+
extern __host__ __device__ __cudart_builtin__ void __assert(
|
| 197 |
+
const char *, const char *, unsigned int, const char *);
|
| 198 |
+
#if !defined(_LIBCPP_VERSION)
|
| 199 |
+
}
|
| 200 |
+
#endif
|
| 201 |
+
#elif defined(__HORIZON__)
|
| 202 |
+
extern __host__ __device__ __cudart_builtin__ void __assert_fail(
|
| 203 |
+
const char *, const char *, int, const char *);
|
| 204 |
+
#elif defined(__GNUC__)
|
| 205 |
+
extern __host__ __device__ __cudart_builtin__ void __assert_fail(
|
| 206 |
+
const char *, const char *, unsigned int, const char *)
|
| 207 |
+
__THROW;
|
| 208 |
+
#elif defined(_WIN32)
|
| 209 |
+
extern __host__ __device__ __cudart_builtin__ _CRTIMP void __cdecl _wassert(
|
| 210 |
+
const wchar_t *, const wchar_t *, unsigned);
|
| 211 |
+
#endif
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
#if defined(__CUDACC_RTC__)
|
| 215 |
+
#ifdef NDEBUG
|
| 216 |
+
#define assert(e) (static_cast<void>(0))
|
| 217 |
+
#else /* !NDEBUG */
|
| 218 |
+
#define __ASSERT_STR_HELPER(x) #x
|
| 219 |
+
#define assert(e) ((e) ? static_cast<void>(0)\
|
| 220 |
+
: __assertfail(__ASSERT_STR_HELPER(e), __FILE__,\
|
| 221 |
+
__LINE__, __PRETTY_FUNCTION__,\
|
| 222 |
+
sizeof(char)))
|
| 223 |
+
#endif /* NDEBUG */
|
| 224 |
+
__host__ __device__ void* operator new(size_t);
|
| 225 |
+
__host__ __device__ void* operator new[](size_t);
|
| 226 |
+
__host__ __device__ void operator delete(void*);
|
| 227 |
+
__host__ __device__ void operator delete[](void*);
|
| 228 |
+
# if __cplusplus >= 201402L
|
| 229 |
+
__host__ __device__ void operator delete(void*, size_t);
|
| 230 |
+
__host__ __device__ void operator delete[](void*, size_t);
|
| 231 |
+
#endif /* __cplusplus >= 201402L */
|
| 232 |
+
|
| 233 |
+
#if __cplusplus >= 201703L
|
| 234 |
+
namespace std { enum class align_val_t : size_t {}; }
|
| 235 |
+
__host__ __device__ void* __cdecl operator new(size_t sz, std::align_val_t) noexcept;
|
| 236 |
+
__host__ __device__ void* __cdecl operator new[](size_t sz, std::align_val_t) noexcept;
|
| 237 |
+
__host__ __device__ void __cdecl operator delete(void* ptr, std::align_val_t) noexcept;
|
| 238 |
+
__host__ __device__ void __cdecl operator delete[](void* ptr, std::align_val_t) noexcept;
|
| 239 |
+
__host__ __device__ void __cdecl operator delete(void* ptr, size_t, std::align_val_t) noexcept;
|
| 240 |
+
__host__ __device__ void __cdecl operator delete[](void* ptr, size_t, std::align_val_t) noexcept;
|
| 241 |
+
#endif /* __cplusplus >= 201703L */
|
| 242 |
+
|
| 243 |
+
#else /* !__CUDACC_RTC__ */
|
| 244 |
+
#if defined (__GNUC__)
|
| 245 |
+
|
| 246 |
+
#define __NV_GLIBCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
|
| 247 |
+
|
| 248 |
+
#if (__cplusplus >= 201103L) && ((!(defined(__QNX__) && defined(_LIBCPP_VERSION))) || (defined(__QNX__) && __NV_GLIBCXX_VERSION >= 80300))
|
| 249 |
+
#define THROWBADALLOC
|
| 250 |
+
#else
|
| 251 |
+
#if defined(__ANDROID__) && !defined(_LIBCPP_VERSION) && (defined(__BIONIC__) || __NV_GLIBCXX_VERSION < 40900)
|
| 252 |
+
#define THROWBADALLOC
|
| 253 |
+
#else
|
| 254 |
+
#define THROWBADALLOC throw(STD bad_alloc)
|
| 255 |
+
#endif
|
| 256 |
+
#endif
|
| 257 |
+
#define __DELETE_THROW throw()
|
| 258 |
+
|
| 259 |
+
#undef __NV_GLIBCXX_VERSION
|
| 260 |
+
|
| 261 |
+
#else /* __GNUC__ */
|
| 262 |
+
|
| 263 |
+
#define THROWBADALLOC throw(...)
|
| 264 |
+
|
| 265 |
+
#endif /* __GNUC__ */
|
| 266 |
+
|
| 267 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t) THROWBADALLOC;
|
| 268 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t) THROWBADALLOC;
|
| 269 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*) throw();
|
| 270 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*) throw();
|
| 271 |
+
# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__)
|
| 272 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw();
|
| 273 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw();
|
| 274 |
+
#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */
|
| 275 |
+
|
| 276 |
+
#if __cpp_aligned_new
|
| 277 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, std::align_val_t);
|
| 278 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, std::align_val_t);
|
| 279 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, std::align_val_t) noexcept;
|
| 280 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, std::align_val_t) noexcept;
|
| 281 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t, std::align_val_t) noexcept;
|
| 282 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t, std::align_val_t) noexcept;
|
| 283 |
+
#endif /* __cpp_aligned_new */
|
| 284 |
+
|
| 285 |
+
#undef THROWBADALLOC
|
| 286 |
+
#undef STD
|
| 287 |
+
#endif /* __CUDACC_RTC__ */
|
| 288 |
+
|
| 289 |
+
#endif /* __CUDA_ARCH__ */
|
| 290 |
+
|
| 291 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 292 |
+
|
| 293 |
+
/*******************************************************************************
|
| 294 |
+
* *
|
| 295 |
+
* *
|
| 296 |
+
* *
|
| 297 |
+
*******************************************************************************/
|
| 298 |
+
|
| 299 |
+
#if defined(__CUDACC_RTC__) && (__CUDA_ARCH__ >= 350)
|
| 300 |
+
#include "cuda_device_runtime_api.h"
|
| 301 |
+
#endif
|
| 302 |
+
|
| 303 |
+
#include "math_functions.h"
|
| 304 |
+
|
| 305 |
+
#endif /* !__COMMON_FUNCTIONS_H__ */
|
| 306 |
+
|
| 307 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__)
|
| 308 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 309 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__
|
| 310 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/cudacc_ext.h
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2021-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__)
|
| 62 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 63 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__
|
| 64 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/device_functions.hpp
ADDED
|
@@ -0,0 +1,1197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__DEVICE_FUNCTIONS_HPP__)
|
| 61 |
+
#define __DEVICE_FUNCTIONS_HPP__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if defined(__CUDACC_RTC__)
|
| 72 |
+
#define __DEVICE_FUNCTIONS_DECL__ __device__
|
| 73 |
+
#define __DEVICE_FUNCTIONS_STATIC_DECL__ __device__
|
| 74 |
+
#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ __device__ __host__ __cudart_builtin__
|
| 75 |
+
#else
|
| 76 |
+
#define __DEVICE_FUNCTIONS_DECL__ __device__
|
| 77 |
+
#define __DEVICE_FUNCTIONS_STATIC_DECL__ static __inline__ __device__
|
| 78 |
+
#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __host__ __cudart_builtin__
|
| 79 |
+
#endif /* __CUDACC_RTC__ */
|
| 80 |
+
|
| 81 |
+
#include "builtin_types.h"
|
| 82 |
+
#include "device_types.h"
|
| 83 |
+
#include "host_defines.h"
|
| 84 |
+
|
| 85 |
+
#undef __DEVICE_FUNCTIONS_DECL__
|
| 86 |
+
#undef __DEVICE_FUNCTIONS_STATIC_DECL__
|
| 87 |
+
|
| 88 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 89 |
+
|
| 90 |
+
/*******************************************************************************
|
| 91 |
+
* *
|
| 92 |
+
* *
|
| 93 |
+
* *
|
| 94 |
+
*******************************************************************************/
|
| 95 |
+
|
| 96 |
+
#ifdef __CUDACC__
|
| 97 |
+
# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900)
|
| 98 |
+
#define __CUDA_AND_AT_LEAST_SM_90__
|
| 99 |
+
#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) */
|
| 100 |
+
# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
|
| 101 |
+
#define __CUDA_AND_AT_LEAST_SM_70__
|
| 102 |
+
#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) */
|
| 103 |
+
# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)
|
| 104 |
+
#define __CUDA_AND_AT_LEAST_SM_75__
|
| 105 |
+
#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) */
|
| 106 |
+
#endif /* __CUDACC__ */
|
| 107 |
+
|
| 108 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax_s32_relu(const int a, const int b){
|
| 109 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 110 |
+
int res;
|
| 111 |
+
asm("{max.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 112 |
+
return res;
|
| 113 |
+
#else
|
| 114 |
+
// Host and older architecture code
|
| 115 |
+
int ans = max(a, b);
|
| 116 |
+
|
| 117 |
+
return (ans > 0) ? ans : 0;
|
| 118 |
+
#endif
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax_s16x2_relu(const unsigned int a, const unsigned int b){
|
| 122 |
+
unsigned int res;
|
| 123 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 124 |
+
asm("{max.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 125 |
+
#elif defined(__CUDA_ARCH__)
|
| 126 |
+
res = __vmaxs2(__vmaxs2(a, b), 0U);
|
| 127 |
+
#else
|
| 128 |
+
// Host and older architecture code
|
| 129 |
+
// Separate our high and low bit:
|
| 130 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 131 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 132 |
+
|
| 133 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 134 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 135 |
+
|
| 136 |
+
//cast to signed:
|
| 137 |
+
short aS_lo = *(short*)& aU_lo;
|
| 138 |
+
short aS_hi = *(short*)& aU_hi;
|
| 139 |
+
|
| 140 |
+
short bS_lo = *(short*)& bU_lo;
|
| 141 |
+
short bS_hi = *(short*)& bU_hi;
|
| 142 |
+
|
| 143 |
+
// Get answer
|
| 144 |
+
short ansS_lo = (short)max(aS_lo, bS_lo);
|
| 145 |
+
short ansS_hi = (short)max(aS_hi, bS_hi);
|
| 146 |
+
|
| 147 |
+
// relu
|
| 148 |
+
if(ansS_lo < 0){ ansS_lo = 0; }
|
| 149 |
+
if(ansS_hi < 0){ ansS_hi = 0; }
|
| 150 |
+
|
| 151 |
+
// Cast back to unsigned:
|
| 152 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 153 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 154 |
+
|
| 155 |
+
// Put answer back together:
|
| 156 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 157 |
+
#endif
|
| 158 |
+
|
| 159 |
+
return res;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin_s32_relu(const int a, const int b){
|
| 163 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 164 |
+
int res;
|
| 165 |
+
asm("{min.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 166 |
+
return res;
|
| 167 |
+
#else
|
| 168 |
+
// Host and older architecture code
|
| 169 |
+
int ans = min(a, b);
|
| 170 |
+
|
| 171 |
+
return (ans > 0) ? ans : 0;
|
| 172 |
+
#endif
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin_s16x2_relu(const unsigned int a, const unsigned int b){
|
| 176 |
+
unsigned int res;
|
| 177 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 178 |
+
asm("{min.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 179 |
+
#elif defined(__CUDA_ARCH__)
|
| 180 |
+
res = __vmaxs2(__vmins2(a, b), 0U);
|
| 181 |
+
#else
|
| 182 |
+
// Host and older architecture code
|
| 183 |
+
// Separate our high and low bit:
|
| 184 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 185 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 186 |
+
|
| 187 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 188 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 189 |
+
|
| 190 |
+
//cast to signed:
|
| 191 |
+
short aS_lo = *(short*)& aU_lo;
|
| 192 |
+
short aS_hi = *(short*)& aU_hi;
|
| 193 |
+
|
| 194 |
+
short bS_lo = *(short*)& bU_lo;
|
| 195 |
+
short bS_hi = *(short*)& bU_hi;
|
| 196 |
+
|
| 197 |
+
// Get answer
|
| 198 |
+
short ansS_lo = (short)min(aS_lo, bS_lo);
|
| 199 |
+
short ansS_hi = (short)min(aS_hi, bS_hi);
|
| 200 |
+
|
| 201 |
+
// relu
|
| 202 |
+
if(ansS_lo < 0){ ansS_lo = 0; }
|
| 203 |
+
if(ansS_hi < 0){ ansS_hi = 0; }
|
| 204 |
+
|
| 205 |
+
// Cast back to unsigned:
|
| 206 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 207 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 208 |
+
|
| 209 |
+
// Put answer back together:
|
| 210 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 211 |
+
#endif
|
| 212 |
+
|
| 213 |
+
return res;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32(const int a, const int b, const int c){
|
| 217 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 218 |
+
int res;
|
| 219 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 220 |
+
"max.s32 t1, %1, %2; \n\t"
|
| 221 |
+
"max.s32 %0, t1, %3;}\n\t"
|
| 222 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 223 |
+
return res;
|
| 224 |
+
#else
|
| 225 |
+
// Host and older architecture code
|
| 226 |
+
return max(max(a, b), c);
|
| 227 |
+
#endif
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 231 |
+
unsigned int res;
|
| 232 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 233 |
+
// Future asm code (naming/syntax may change):
|
| 234 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 235 |
+
"max.s16x2 t1, %1, %2; \n\t"
|
| 236 |
+
"max.s16x2 %0, t1, %3;}\n\t"
|
| 237 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 238 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_70__)
|
| 239 |
+
res = __vmaxs2(__vmaxs2(a, b), c);
|
| 240 |
+
#else
|
| 241 |
+
// Host and older architecture code
|
| 242 |
+
// Separate our high and low bit:
|
| 243 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 244 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 245 |
+
|
| 246 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 247 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 248 |
+
|
| 249 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 250 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 251 |
+
|
| 252 |
+
//cast to signed:
|
| 253 |
+
short aS_lo = *(short*)& aU_lo;
|
| 254 |
+
short aS_hi = *(short*)& aU_hi;
|
| 255 |
+
|
| 256 |
+
short bS_lo = *(short*)& bU_lo;
|
| 257 |
+
short bS_hi = *(short*)& bU_hi;
|
| 258 |
+
|
| 259 |
+
short cS_lo = *(short*)& cU_lo;
|
| 260 |
+
short cS_hi = *(short*)& cU_hi;
|
| 261 |
+
|
| 262 |
+
// Get answer
|
| 263 |
+
short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo);
|
| 264 |
+
short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi);
|
| 265 |
+
|
| 266 |
+
// Cast back to unsigned:
|
| 267 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 268 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 269 |
+
|
| 270 |
+
// Put answer back together:
|
| 271 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 272 |
+
#endif
|
| 273 |
+
return res;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 277 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 278 |
+
int res;
|
| 279 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 280 |
+
"max.u32 t1, %1, %2; \n\t"
|
| 281 |
+
"max.u32 %0, t1, %3;}\n\t"
|
| 282 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 283 |
+
return res;
|
| 284 |
+
#else
|
| 285 |
+
// Host and older architecture code
|
| 286 |
+
return max(max(a, b), c);
|
| 287 |
+
#endif
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 291 |
+
unsigned int res;
|
| 292 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 293 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 294 |
+
"max.u16x2 t1, %1, %2; \n\t"
|
| 295 |
+
"max.u16x2 %0, t1, %3;}\n\t"
|
| 296 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 297 |
+
#elif defined(__CUDA_ARCH__)
|
| 298 |
+
res = __vmaxu2(__vmaxu2(a, b), c);
|
| 299 |
+
#else
|
| 300 |
+
// Host and older architecture code
|
| 301 |
+
// Separate our high and low bit:
|
| 302 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 303 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 304 |
+
|
| 305 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 306 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 307 |
+
|
| 308 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 309 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 310 |
+
|
| 311 |
+
// Get answer
|
| 312 |
+
unsigned short ansU_lo = (unsigned short)max(max(aU_lo, bU_lo), cU_lo);
|
| 313 |
+
unsigned short ansU_hi = (unsigned short)max(max(aU_hi, bU_hi), cU_hi);
|
| 314 |
+
|
| 315 |
+
// Put answer back together:
|
| 316 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 317 |
+
#endif
|
| 318 |
+
|
| 319 |
+
return res;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32(const int a, const int b, const int c){
|
| 323 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 324 |
+
int res;
|
| 325 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 326 |
+
"min.s32 t1, %1, %2; \n\t"
|
| 327 |
+
"min.s32 %0, t1, %3;}\n\t"
|
| 328 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 329 |
+
return res;
|
| 330 |
+
#else
|
| 331 |
+
// Host and older architecture code
|
| 332 |
+
return min(min(a, b), c);
|
| 333 |
+
#endif
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 337 |
+
unsigned int res;
|
| 338 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 339 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 340 |
+
"min.s16x2 t1, %1, %2; \n\t"
|
| 341 |
+
"min.s16x2 %0, t1, %3;}\n\t"
|
| 342 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 343 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_70__)
|
| 344 |
+
res = __vmins2(__vmins2(a, b), c);
|
| 345 |
+
#else
|
| 346 |
+
// Host and older architecture code
|
| 347 |
+
// Separate our high and low bit:
|
| 348 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 349 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 350 |
+
|
| 351 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 352 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 353 |
+
|
| 354 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 355 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 356 |
+
|
| 357 |
+
//cast to signed:
|
| 358 |
+
short aS_lo = *(short*)& aU_lo;
|
| 359 |
+
short aS_hi = *(short*)& aU_hi;
|
| 360 |
+
|
| 361 |
+
short bS_lo = *(short*)& bU_lo;
|
| 362 |
+
short bS_hi = *(short*)& bU_hi;
|
| 363 |
+
|
| 364 |
+
short cS_lo = *(short*)& cU_lo;
|
| 365 |
+
short cS_hi = *(short*)& cU_hi;
|
| 366 |
+
|
| 367 |
+
// Get answer
|
| 368 |
+
short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo);
|
| 369 |
+
short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi);
|
| 370 |
+
|
| 371 |
+
// Cast back to unsigned:
|
| 372 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 373 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 374 |
+
|
| 375 |
+
// Put answer back together:
|
| 376 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 377 |
+
#endif
|
| 378 |
+
|
| 379 |
+
return res;
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 383 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 384 |
+
int res;
|
| 385 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 386 |
+
"min.u32 t1, %1, %2; \n\t"
|
| 387 |
+
"min.u32 %0, t1, %3;}\n\t"
|
| 388 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 389 |
+
return res;
|
| 390 |
+
#else
|
| 391 |
+
// Host and older architecture code
|
| 392 |
+
return min(min(a, b), c);
|
| 393 |
+
#endif
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 397 |
+
unsigned int res;
|
| 398 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 399 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 400 |
+
"min.u16x2 t1, %1, %2; \n\t"
|
| 401 |
+
"min.u16x2 %0, t1, %3;}\n\t"
|
| 402 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 403 |
+
#elif defined(__CUDA_ARCH__)
|
| 404 |
+
res = __vminu2(__vminu2(a, b), c);
|
| 405 |
+
#else
|
| 406 |
+
// Host and older architecture code
|
| 407 |
+
// Separate our high and low bit:
|
| 408 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 409 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 410 |
+
|
| 411 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 412 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 413 |
+
|
| 414 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 415 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 416 |
+
|
| 417 |
+
// Get answer
|
| 418 |
+
unsigned short ansU_lo = (unsigned short)min(min(aU_lo, bU_lo), cU_lo);
|
| 419 |
+
unsigned short ansU_hi = (unsigned short)min(min(aU_hi, bU_hi), cU_hi);
|
| 420 |
+
|
| 421 |
+
// Put answer back together:
|
| 422 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 423 |
+
#endif
|
| 424 |
+
|
| 425 |
+
return res;
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32_relu(const int a, const int b, const int c){
|
| 429 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 430 |
+
int res;
|
| 431 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 432 |
+
"max.s32.relu t1, %1, %2; \n\t"
|
| 433 |
+
"max.s32.relu %0, t1, %3;}\n\t"
|
| 434 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 435 |
+
return res;
|
| 436 |
+
#else
|
| 437 |
+
// Host and older architecture code
|
| 438 |
+
int ans = max(max(a, b), c);
|
| 439 |
+
|
| 440 |
+
return (ans > 0) ? ans : 0;
|
| 441 |
+
#endif
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 445 |
+
unsigned int res;
|
| 446 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 447 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 448 |
+
"max.s16x2.relu t1, %1, %2; \n\t"
|
| 449 |
+
"max.s16x2.relu %0, t1, %3;}\n\t"
|
| 450 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 451 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_75__)
|
| 452 |
+
res = __vimax_s16x2_relu(__vmaxs2(a, b), c);
|
| 453 |
+
#else
|
| 454 |
+
// Host and older architecture code
|
| 455 |
+
// Separate our high and low bit:
|
| 456 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 457 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 458 |
+
|
| 459 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 460 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 461 |
+
|
| 462 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 463 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 464 |
+
|
| 465 |
+
//cast to signed:
|
| 466 |
+
short aS_lo = *(short*)& aU_lo;
|
| 467 |
+
short aS_hi = *(short*)& aU_hi;
|
| 468 |
+
|
| 469 |
+
short bS_lo = *(short*)& bU_lo;
|
| 470 |
+
short bS_hi = *(short*)& bU_hi;
|
| 471 |
+
|
| 472 |
+
short cS_lo = *(short*)& cU_lo;
|
| 473 |
+
short cS_hi = *(short*)& cU_hi;
|
| 474 |
+
|
| 475 |
+
// Get answer
|
| 476 |
+
short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo);
|
| 477 |
+
short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi);
|
| 478 |
+
|
| 479 |
+
// relu
|
| 480 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 481 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 482 |
+
|
| 483 |
+
// Cast back to unsigned:
|
| 484 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 485 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 486 |
+
|
| 487 |
+
// Put answer back together:
|
| 488 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 489 |
+
#endif
|
| 490 |
+
|
| 491 |
+
return res;
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32_relu(const int a, const int b, const int c){
|
| 495 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 496 |
+
int res;
|
| 497 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 498 |
+
"min.s32.relu t1, %1, %2; \n\t"
|
| 499 |
+
"min.s32.relu %0, t1, %3;}\n\t"
|
| 500 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 501 |
+
return res;
|
| 502 |
+
#else
|
| 503 |
+
// Host and older architecture code
|
| 504 |
+
int ans = min(min(a, b), c);
|
| 505 |
+
|
| 506 |
+
return (ans > 0) ? ans : 0;
|
| 507 |
+
#endif
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 511 |
+
unsigned res;
|
| 512 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 513 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 514 |
+
"min.s16x2.relu t1, %1, %2; \n\t"
|
| 515 |
+
"min.s16x2.relu %0, t1, %3;}\n\t"
|
| 516 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 517 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_75__)
|
| 518 |
+
res = __vimin_s16x2_relu(__vmins2(a, b), c);
|
| 519 |
+
#else
|
| 520 |
+
// Host and older architecture code
|
| 521 |
+
// Separate our high and low bit:
|
| 522 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 523 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 524 |
+
|
| 525 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 526 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 527 |
+
|
| 528 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 529 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 530 |
+
|
| 531 |
+
//cast to signed:
|
| 532 |
+
short aS_lo = *(short*)& aU_lo;
|
| 533 |
+
short aS_hi = *(short*)& aU_hi;
|
| 534 |
+
|
| 535 |
+
short bS_lo = *(short*)& bU_lo;
|
| 536 |
+
short bS_hi = *(short*)& bU_hi;
|
| 537 |
+
|
| 538 |
+
short cS_lo = *(short*)& cU_lo;
|
| 539 |
+
short cS_hi = *(short*)& cU_hi;
|
| 540 |
+
|
| 541 |
+
// Get answer
|
| 542 |
+
short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo);
|
| 543 |
+
short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi);
|
| 544 |
+
|
| 545 |
+
// relu
|
| 546 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 547 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 548 |
+
|
| 549 |
+
// Cast back to unsigned:
|
| 550 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 551 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 552 |
+
|
| 553 |
+
// Put answer back together:
|
| 554 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 555 |
+
#endif
|
| 556 |
+
|
| 557 |
+
return res;
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32(const int a, const int b, const int c){
|
| 561 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 562 |
+
int res;
|
| 563 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 564 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 565 |
+
"max.s32 %0, t1, %3;}\n\t"
|
| 566 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 567 |
+
return res;
|
| 568 |
+
#else
|
| 569 |
+
// Host and older architecture code
|
| 570 |
+
return max(a + b, c);
|
| 571 |
+
#endif
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 575 |
+
unsigned int res;
|
| 576 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 577 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 578 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 579 |
+
"max.s16x2 %0, t1, %3;}\n\t"
|
| 580 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 581 |
+
#elif defined(__CUDA_ARCH__)
|
| 582 |
+
res = __vmaxs2(__vadd2(a, b), c);
|
| 583 |
+
#else
|
| 584 |
+
// Host and older architecture code
|
| 585 |
+
// Separate our high and low bit:
|
| 586 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 587 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 588 |
+
|
| 589 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 590 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 591 |
+
|
| 592 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 593 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 594 |
+
|
| 595 |
+
//cast to signed:
|
| 596 |
+
short aS_lo = *(short*)& aU_lo;
|
| 597 |
+
short aS_hi = *(short*)& aU_hi;
|
| 598 |
+
|
| 599 |
+
short bS_lo = *(short*)& bU_lo;
|
| 600 |
+
short bS_hi = *(short*)& bU_hi;
|
| 601 |
+
|
| 602 |
+
short cS_lo = *(short*)& cU_lo;
|
| 603 |
+
short cS_hi = *(short*)& cU_hi;
|
| 604 |
+
|
| 605 |
+
// Get answer
|
| 606 |
+
short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo);
|
| 607 |
+
short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi);
|
| 608 |
+
|
| 609 |
+
// Cast back to unsigned:
|
| 610 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 611 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 612 |
+
|
| 613 |
+
// Put answer back together:
|
| 614 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 615 |
+
#endif
|
| 616 |
+
|
| 617 |
+
return res;
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 621 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 622 |
+
unsigned int res;
|
| 623 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 624 |
+
"add.u32 t1, %1, %2; \n\t"
|
| 625 |
+
"max.u32 %0, t1, %3;}\n\t"
|
| 626 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 627 |
+
return res;
|
| 628 |
+
#else
|
| 629 |
+
// Host and older architecture code
|
| 630 |
+
return max(a + b, c);
|
| 631 |
+
#endif
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 635 |
+
unsigned int res;
|
| 636 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 637 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 638 |
+
"add.u16x2 t1, %1, %2; \n\t"
|
| 639 |
+
"max.u16x2 %0, t1, %3;}\n\t"
|
| 640 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 641 |
+
#elif defined(__CUDA_ARCH__)
|
| 642 |
+
res = __vmaxu2(__vadd2(a, b), c);
|
| 643 |
+
#else
|
| 644 |
+
// Host and older architecture code
|
| 645 |
+
// Separate our high and low bit:
|
| 646 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 647 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 648 |
+
|
| 649 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 650 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 651 |
+
|
| 652 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 653 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 654 |
+
|
| 655 |
+
// Get answer
|
| 656 |
+
unsigned short ansU_lo = (unsigned short)max((unsigned short)(aU_lo + bU_lo), cU_lo);
|
| 657 |
+
unsigned short ansU_hi = (unsigned short)max((unsigned short)(aU_hi + bU_hi), cU_hi);
|
| 658 |
+
|
| 659 |
+
// Put answer back together:
|
| 660 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 661 |
+
#endif
|
| 662 |
+
|
| 663 |
+
return res;
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32(const int a, const int b, const int c){
|
| 667 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 668 |
+
int res;
|
| 669 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 670 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 671 |
+
"min.s32 %0, t1, %3;}\n\t"
|
| 672 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 673 |
+
return res;
|
| 674 |
+
#else
|
| 675 |
+
// Host and older architecture code
|
| 676 |
+
return min(a + b, c);
|
| 677 |
+
#endif
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 681 |
+
unsigned int res;
|
| 682 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 683 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 684 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 685 |
+
"min.s16x2 %0, t1, %3;}\n\t"
|
| 686 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 687 |
+
#elif defined(__CUDA_ARCH__)
|
| 688 |
+
res = __vmins2(__vadd2(a, b), c);
|
| 689 |
+
#else
|
| 690 |
+
// Host and older architecture code
|
| 691 |
+
// Separate our high and low bit:
|
| 692 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 693 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 694 |
+
|
| 695 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 696 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 697 |
+
|
| 698 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 699 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 700 |
+
|
| 701 |
+
//cast to signed:
|
| 702 |
+
short aS_lo = *(short*)& aU_lo;
|
| 703 |
+
short aS_hi = *(short*)& aU_hi;
|
| 704 |
+
|
| 705 |
+
short bS_lo = *(short*)& bU_lo;
|
| 706 |
+
short bS_hi = *(short*)& bU_hi;
|
| 707 |
+
|
| 708 |
+
short cS_lo = *(short*)& cU_lo;
|
| 709 |
+
short cS_hi = *(short*)& cU_hi;
|
| 710 |
+
|
| 711 |
+
// Get answer
|
| 712 |
+
short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo);
|
| 713 |
+
short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi);
|
| 714 |
+
|
| 715 |
+
// Cast back to unsigned:
|
| 716 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 717 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 718 |
+
|
| 719 |
+
// Put answer back together:
|
| 720 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 721 |
+
#endif
|
| 722 |
+
|
| 723 |
+
return res;
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 727 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 728 |
+
unsigned int res;
|
| 729 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 730 |
+
"add.u32 t1, %1, %2; \n\t"
|
| 731 |
+
"min.u32 %0, t1, %3;}\n\t"
|
| 732 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 733 |
+
return res;
|
| 734 |
+
#else
|
| 735 |
+
// Host and older architecture code
|
| 736 |
+
return min(a + b, c);
|
| 737 |
+
#endif
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 741 |
+
unsigned int res;
|
| 742 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 743 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 744 |
+
"add.u16x2 t1, %1, %2; \n\t"
|
| 745 |
+
"min.u16x2 %0, t1, %3;}\n\t"
|
| 746 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 747 |
+
#elif defined(__CUDA_ARCH__)
|
| 748 |
+
res = __vminu2(__vadd2(a, b), c);
|
| 749 |
+
#else
|
| 750 |
+
// Host and older architecture code
|
| 751 |
+
// Separate our high and low bit:
|
| 752 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 753 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 754 |
+
|
| 755 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 756 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 757 |
+
|
| 758 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 759 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 760 |
+
|
| 761 |
+
// Get answer
|
| 762 |
+
unsigned short ansU_lo = (unsigned short)min((unsigned short)(aU_lo + bU_lo), cU_lo);
|
| 763 |
+
unsigned short ansU_hi = (unsigned short)min((unsigned short)(aU_hi + bU_hi), cU_hi);
|
| 764 |
+
|
| 765 |
+
// Put answer back together:
|
| 766 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 767 |
+
#endif
|
| 768 |
+
|
| 769 |
+
return res;
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32_relu(const int a, const int b, const int c){
|
| 773 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 774 |
+
int res;
|
| 775 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 776 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 777 |
+
"max.s32.relu %0, t1, %3;}\n\t"
|
| 778 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 779 |
+
return res;
|
| 780 |
+
#else
|
| 781 |
+
// Host and older architecture code
|
| 782 |
+
int ans = max(a + b, c);
|
| 783 |
+
|
| 784 |
+
return (ans > 0) ? ans : 0;
|
| 785 |
+
#endif
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 789 |
+
unsigned int res;
|
| 790 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 791 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 792 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 793 |
+
"max.s16x2.relu %0, t1, %3;}\n\t"
|
| 794 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 795 |
+
#elif defined(__CUDA_ARCH__)
|
| 796 |
+
res = __vimax_s16x2_relu(__vadd2(a, b), c);
|
| 797 |
+
#else
|
| 798 |
+
// Host and older architecture code
|
| 799 |
+
// Separate our high and low bit:
|
| 800 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 801 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 802 |
+
|
| 803 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 804 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 805 |
+
|
| 806 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 807 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 808 |
+
|
| 809 |
+
//cast to signed:
|
| 810 |
+
short aS_lo = *(short*)& aU_lo;
|
| 811 |
+
short aS_hi = *(short*)& aU_hi;
|
| 812 |
+
|
| 813 |
+
short bS_lo = *(short*)& bU_lo;
|
| 814 |
+
short bS_hi = *(short*)& bU_hi;
|
| 815 |
+
|
| 816 |
+
short cS_lo = *(short*)& cU_lo;
|
| 817 |
+
short cS_hi = *(short*)& cU_hi;
|
| 818 |
+
|
| 819 |
+
// Get answer
|
| 820 |
+
short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo);
|
| 821 |
+
short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi);
|
| 822 |
+
|
| 823 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 824 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 825 |
+
|
| 826 |
+
// Cast back to unsigned:
|
| 827 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 828 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 829 |
+
|
| 830 |
+
// Put answer back together:
|
| 831 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 832 |
+
#endif
|
| 833 |
+
|
| 834 |
+
return res;
|
| 835 |
+
}
|
| 836 |
+
|
| 837 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32_relu(const int a, const int b, const int c){
|
| 838 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 839 |
+
int res;
|
| 840 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 841 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 842 |
+
"min.s32.relu %0, t1, %3;}\n\t"
|
| 843 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 844 |
+
return res;
|
| 845 |
+
#else
|
| 846 |
+
// Host and older architecture code
|
| 847 |
+
int ans = min(a + b, c);
|
| 848 |
+
|
| 849 |
+
return (ans > 0) ? ans : 0;
|
| 850 |
+
#endif
|
| 851 |
+
}
|
| 852 |
+
|
| 853 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 854 |
+
unsigned int res;
|
| 855 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 856 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 857 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 858 |
+
"min.s16x2.relu %0, t1, %3;}\n\t"
|
| 859 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 860 |
+
#elif defined(__CUDA_ARCH__)
|
| 861 |
+
res = __vimin_s16x2_relu(__vadd2(a, b), c);
|
| 862 |
+
#else
|
| 863 |
+
// Host and older architecture code
|
| 864 |
+
// Separate our high and low bit:
|
| 865 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 866 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 867 |
+
|
| 868 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 869 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 870 |
+
|
| 871 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 872 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 873 |
+
|
| 874 |
+
//cast to signed:
|
| 875 |
+
short aS_lo = *(short*)& aU_lo;
|
| 876 |
+
short aS_hi = *(short*)& aU_hi;
|
| 877 |
+
|
| 878 |
+
short bS_lo = *(short*)& bU_lo;
|
| 879 |
+
short bS_hi = *(short*)& bU_hi;
|
| 880 |
+
|
| 881 |
+
short cS_lo = *(short*)& cU_lo;
|
| 882 |
+
short cS_hi = *(short*)& cU_hi;
|
| 883 |
+
|
| 884 |
+
// Get answer
|
| 885 |
+
short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo);
|
| 886 |
+
short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi);
|
| 887 |
+
|
| 888 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 889 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 890 |
+
|
| 891 |
+
// Cast back to unsigned:
|
| 892 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 893 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 894 |
+
|
| 895 |
+
// Put answer back together:
|
| 896 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 897 |
+
#endif
|
| 898 |
+
|
| 899 |
+
return res;
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
// vimax vimin with predicate
|
| 903 |
+
// *pred gets set to '(a >= b)'
|
| 904 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmax_s32(const int a, const int b, bool* const pred){
|
| 905 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 906 |
+
int val;
|
| 907 |
+
unsigned int predicate_local;
|
| 908 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 909 |
+
" setp.ge.s32 __$temp1, %2, %3;\n\t"
|
| 910 |
+
" selp.s32 %0, %2, %3, __$temp1;\n\t"
|
| 911 |
+
" selp.s32 %1, 1, 0, __$temp1;}\n\t"
|
| 912 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 913 |
+
|
| 914 |
+
*pred = (bool)predicate_local;
|
| 915 |
+
return val;
|
| 916 |
+
#else
|
| 917 |
+
// Host and older architecture code
|
| 918 |
+
int ans = max(a, b);
|
| 919 |
+
|
| 920 |
+
*pred = (a >= b);
|
| 921 |
+
return ans;
|
| 922 |
+
#endif
|
| 923 |
+
}
|
| 924 |
+
|
| 925 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u32(const unsigned int a, const unsigned int b, bool* const pred){
|
| 926 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 927 |
+
unsigned int val;
|
| 928 |
+
unsigned int predicate_local;
|
| 929 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 930 |
+
" setp.ge.u32 __$temp1, %2, %3;\n\t"
|
| 931 |
+
" selp.u32 %0, %2, %3, __$temp1;\n\t"
|
| 932 |
+
" selp.u32 %1, 1, 0, __$temp1;}\n\t"
|
| 933 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 934 |
+
|
| 935 |
+
*pred = (bool)predicate_local;
|
| 936 |
+
return val;
|
| 937 |
+
#else
|
| 938 |
+
// Host and older architecture code
|
| 939 |
+
unsigned int ans = max(a, b);
|
| 940 |
+
|
| 941 |
+
*pred = (a >= b);
|
| 942 |
+
return ans;
|
| 943 |
+
#endif
|
| 944 |
+
}
|
| 945 |
+
|
| 946 |
+
// *pred gets set to '(a <= b)'
|
| 947 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmin_s32(const int a, const int b, bool* const pred){
|
| 948 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 949 |
+
int val;
|
| 950 |
+
unsigned int predicate_local;
|
| 951 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 952 |
+
" setp.le.s32 __$temp1, %2, %3;\n\t"
|
| 953 |
+
" selp.s32 %0, %2, %3, __$temp1;\n\t"
|
| 954 |
+
" selp.s32 %1, 1, 0, __$temp1;}\n\t"
|
| 955 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 956 |
+
|
| 957 |
+
*pred = (bool)predicate_local;
|
| 958 |
+
return val;
|
| 959 |
+
#else
|
| 960 |
+
// Host and older architecture code
|
| 961 |
+
int ans = min(a, b);
|
| 962 |
+
|
| 963 |
+
*pred = (a <= b);
|
| 964 |
+
return ans;
|
| 965 |
+
#endif
|
| 966 |
+
}
|
| 967 |
+
|
| 968 |
+
// *pred gets set to '(a <= b)'
|
| 969 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u32(const unsigned int a, const unsigned int b, bool* const pred){
|
| 970 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 971 |
+
unsigned int val;
|
| 972 |
+
unsigned int predicate_local;
|
| 973 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 974 |
+
" setp.le.u32 __$temp1, %2, %3;\n\t"
|
| 975 |
+
" selp.u32 %0, %2, %3, __$temp1;\n\t"
|
| 976 |
+
" selp.u32 %1, 1, 0, __$temp1;}\n\t"
|
| 977 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 978 |
+
|
| 979 |
+
*pred = (bool)predicate_local;
|
| 980 |
+
return val;
|
| 981 |
+
#else
|
| 982 |
+
// Host and older architecture code
|
| 983 |
+
unsigned int ans = min(a, b);
|
| 984 |
+
|
| 985 |
+
*pred = (a <= b);
|
| 986 |
+
return ans;
|
| 987 |
+
#endif
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 991 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 992 |
+
unsigned int val;
|
| 993 |
+
unsigned int predicate_local_hi;
|
| 994 |
+
unsigned int predicate_local_lo;
|
| 995 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 996 |
+
".reg .s16 rs0, rs1, rs2, rs3; \n\t"
|
| 997 |
+
"max.s16x2 %0, %3, %4; \n\t"
|
| 998 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 999 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1000 |
+
"setp.eq.s16 pv, rs0, rs2; \n\t"
|
| 1001 |
+
"setp.eq.s16 pu, rs1, rs3; \n\t"
|
| 1002 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1003 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1004 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1005 |
+
|
| 1006 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1007 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1008 |
+
return val;
|
| 1009 |
+
#else
|
| 1010 |
+
// Host and older architecture code
|
| 1011 |
+
// Separate our high and low bit:
|
| 1012 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1013 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1014 |
+
|
| 1015 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1016 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1017 |
+
|
| 1018 |
+
//cast to signed:
|
| 1019 |
+
short aS_lo = *(short*)& aU_lo;
|
| 1020 |
+
short aS_hi = *(short*)& aU_hi;
|
| 1021 |
+
|
| 1022 |
+
short bS_lo = *(short*)& bU_lo;
|
| 1023 |
+
short bS_hi = *(short*)& bU_hi;
|
| 1024 |
+
|
| 1025 |
+
// Get answer
|
| 1026 |
+
short ansS_lo = (short)max(aS_lo, bS_lo);
|
| 1027 |
+
short ansS_hi = (short)max(aS_hi, bS_hi);
|
| 1028 |
+
|
| 1029 |
+
*pred_hi = (aS_hi >= bS_hi);
|
| 1030 |
+
*pred_lo = (aS_lo >= bS_lo);
|
| 1031 |
+
|
| 1032 |
+
// Cast back to unsigned:
|
| 1033 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 1034 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 1035 |
+
|
| 1036 |
+
// Put answer back together:
|
| 1037 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1038 |
+
|
| 1039 |
+
return ans;
|
| 1040 |
+
#endif
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 1044 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1045 |
+
unsigned int val;
|
| 1046 |
+
unsigned int predicate_local_hi;
|
| 1047 |
+
unsigned int predicate_local_lo;
|
| 1048 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 1049 |
+
".reg .u16 rs0, rs1, rs2, rs3; \n\t"
|
| 1050 |
+
"max.u16x2 %0, %3, %4; \n\t"
|
| 1051 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 1052 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1053 |
+
"setp.eq.u16 pv, rs0, rs2; \n\t"
|
| 1054 |
+
"setp.eq.u16 pu, rs1, rs3; \n\t"
|
| 1055 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1056 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1057 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1058 |
+
|
| 1059 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1060 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1061 |
+
return val;
|
| 1062 |
+
#else
|
| 1063 |
+
// Host and older architecture code
|
| 1064 |
+
// Separate our high and low bit:
|
| 1065 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1066 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1067 |
+
|
| 1068 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1069 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1070 |
+
|
| 1071 |
+
// Get answer
|
| 1072 |
+
unsigned short ansU_lo = (unsigned short)max(aU_lo, bU_lo);
|
| 1073 |
+
unsigned short ansU_hi = (unsigned short)max(aU_hi, bU_hi);
|
| 1074 |
+
|
| 1075 |
+
*pred_hi = (aU_hi >= bU_hi);
|
| 1076 |
+
*pred_lo = (aU_lo >= bU_lo);
|
| 1077 |
+
|
| 1078 |
+
// Put answer back together:
|
| 1079 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1080 |
+
|
| 1081 |
+
return ans;
|
| 1082 |
+
#endif
|
| 1083 |
+
}
|
| 1084 |
+
|
| 1085 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 1086 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1087 |
+
unsigned int val;
|
| 1088 |
+
unsigned int predicate_local_hi;
|
| 1089 |
+
unsigned int predicate_local_lo;
|
| 1090 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 1091 |
+
".reg .u16 rs0, rs1, rs2, rs3; \n\t"
|
| 1092 |
+
"min.s16x2 %0, %3, %4; \n\t"
|
| 1093 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 1094 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1095 |
+
"setp.eq.s16 pv, rs0, rs2; \n\t"
|
| 1096 |
+
"setp.eq.s16 pu, rs1, rs3; \n\t"
|
| 1097 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1098 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1099 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1100 |
+
|
| 1101 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1102 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1103 |
+
return val;
|
| 1104 |
+
#else
|
| 1105 |
+
// Host and older architecture code
|
| 1106 |
+
// Separate our high and low bit:
|
| 1107 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1108 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1109 |
+
|
| 1110 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1111 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1112 |
+
|
| 1113 |
+
//cast to signed:
|
| 1114 |
+
short aS_lo = *(short*)& aU_lo;
|
| 1115 |
+
short aS_hi = *(short*)& aU_hi;
|
| 1116 |
+
|
| 1117 |
+
short bS_lo = *(short*)& bU_lo;
|
| 1118 |
+
short bS_hi = *(short*)& bU_hi;
|
| 1119 |
+
|
| 1120 |
+
// Get answer
|
| 1121 |
+
short ansS_lo = (short)min(aS_lo, bS_lo);
|
| 1122 |
+
short ansS_hi = (short)min(aS_hi, bS_hi);
|
| 1123 |
+
|
| 1124 |
+
*pred_hi = (aS_hi <= bS_hi);
|
| 1125 |
+
*pred_lo = (aS_lo <= bS_lo);
|
| 1126 |
+
|
| 1127 |
+
// Cast back to unsigned:
|
| 1128 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 1129 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 1130 |
+
|
| 1131 |
+
// Put answer back together:
|
| 1132 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1133 |
+
|
| 1134 |
+
return ans;
|
| 1135 |
+
#endif
|
| 1136 |
+
}
|
| 1137 |
+
|
| 1138 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 1139 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1140 |
+
unsigned int val;
|
| 1141 |
+
unsigned int predicate_local_hi;
|
| 1142 |
+
unsigned int predicate_local_lo;
|
| 1143 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 1144 |
+
".reg .u16 rs0, rs1, rs2, rs3; \n\t"
|
| 1145 |
+
"min.u16x2 %0, %3, %4; \n\t"
|
| 1146 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 1147 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1148 |
+
"setp.eq.u16 pv, rs0, rs2; \n\t"
|
| 1149 |
+
"setp.eq.u16 pu, rs1, rs3; \n\t"
|
| 1150 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1151 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1152 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1153 |
+
|
| 1154 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1155 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1156 |
+
return val;
|
| 1157 |
+
#else
|
| 1158 |
+
// Host and older architecture code
|
| 1159 |
+
// Separate our high and low bit:
|
| 1160 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1161 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1162 |
+
|
| 1163 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1164 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1165 |
+
|
| 1166 |
+
// Get answer
|
| 1167 |
+
unsigned short ansU_lo = (unsigned short)min(aU_lo, bU_lo);
|
| 1168 |
+
unsigned short ansU_hi = (unsigned short)min(aU_hi, bU_hi);
|
| 1169 |
+
|
| 1170 |
+
*pred_hi = (aU_hi <= bU_hi);
|
| 1171 |
+
*pred_lo = (aU_lo <= bU_lo);
|
| 1172 |
+
|
| 1173 |
+
// Put answer back together:
|
| 1174 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1175 |
+
|
| 1176 |
+
return ans;
|
| 1177 |
+
#endif
|
| 1178 |
+
}
|
| 1179 |
+
|
| 1180 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1181 |
+
#undef __CUDA_AND_AT_LEAST_SM_90__
|
| 1182 |
+
#endif
|
| 1183 |
+
|
| 1184 |
+
#undef __DEVICE_HOST_FUNCTIONS_STATIC_DECL__
|
| 1185 |
+
|
| 1186 |
+
/*******************************************************************************
|
| 1187 |
+
* *
|
| 1188 |
+
* *
|
| 1189 |
+
* *
|
| 1190 |
+
*******************************************************************************/
|
| 1191 |
+
|
| 1192 |
+
#endif /* !__DEVICE_FUNCTIONS_HPP__ */
|
| 1193 |
+
|
| 1194 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__)
|
| 1195 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 1196 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__
|
| 1197 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/func_macro.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* NVIDIA_COPYRIGHT_BEGIN
|
| 3 |
+
*
|
| 4 |
+
* Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 7 |
+
* and proprietary rights in and to this software, related documentation
|
| 8 |
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
| 9 |
+
* distribution of this software and related documentation without an express
|
| 10 |
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 11 |
+
*
|
| 12 |
+
* NVIDIA_COPYRIGHT_END
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 16 |
+
#if defined(_MSC_VER)
|
| 17 |
+
#pragma message("crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 18 |
+
#else
|
| 19 |
+
#warning "crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 20 |
+
#endif
|
| 21 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 22 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#if !defined(__FUNC_MACRO_H__)
|
| 26 |
+
#define __FUNC_MACRO_H__
|
| 27 |
+
|
| 28 |
+
#if !defined(__CUDA_INTERNAL_COMPILATION__)
|
| 29 |
+
|
| 30 |
+
#error -- incorrect inclusion of a cudart header file
|
| 31 |
+
|
| 32 |
+
#endif /* !__CUDA_INTERNAL_COMPILATION__ */
|
| 33 |
+
|
| 34 |
+
#if defined(__GNUC__)
|
| 35 |
+
|
| 36 |
+
#define __func__(decl) \
|
| 37 |
+
inline decl
|
| 38 |
+
|
| 39 |
+
#define __device_func__(decl) \
|
| 40 |
+
static __attribute__((__unused__)) decl
|
| 41 |
+
|
| 42 |
+
#elif defined(_WIN32)
|
| 43 |
+
|
| 44 |
+
#define __func__(decl) \
|
| 45 |
+
static inline decl
|
| 46 |
+
|
| 47 |
+
#define __device_func__(decl) \
|
| 48 |
+
static decl
|
| 49 |
+
|
| 50 |
+
#endif /* __GNUC__ */
|
| 51 |
+
|
| 52 |
+
#endif /* __FUNC_MACRO_H__ */
|
| 53 |
+
|
| 54 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__)
|
| 55 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 56 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__
|
| 57 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/mma.hpp
ADDED
|
@@ -0,0 +1,1128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__CUDA_MMA_HPP__)
|
| 61 |
+
#define __CUDA_MMA_HPP__
|
| 62 |
+
|
| 63 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 64 |
+
|
| 65 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
|
| 66 |
+
|
| 67 |
+
#include <cuda_fp16.h>
|
| 68 |
+
#include <cuda_bf16.h>
|
| 69 |
+
|
| 70 |
+
#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__
|
| 71 |
+
|
| 72 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720
|
| 73 |
+
#define __CUDA_IMMA__ 1
|
| 74 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */
|
| 75 |
+
|
| 76 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730
|
| 77 |
+
#define __CUDA_SUBBYTE_IMMA__ 1
|
| 78 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */
|
| 79 |
+
|
| 80 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
|
| 81 |
+
#define __CUDA_AMPERE_MMA__ 1
|
| 82 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */
|
| 83 |
+
|
| 84 |
+
namespace nvcuda {
|
| 85 |
+
namespace wmma {
|
| 86 |
+
|
| 87 |
+
//
|
| 88 |
+
// Load functions for frags of shape m16n16k16
|
| 89 |
+
//
|
| 90 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 91 |
+
__hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 95 |
+
__hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b,16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 99 |
+
__hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b,16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 103 |
+
__hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator,16, 16, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) {
|
| 107 |
+
if (layout == mem_row_major)
|
| 108 |
+
__hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0);
|
| 109 |
+
else
|
| 110 |
+
__hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator,16, 16, 16, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 114 |
+
if (layout == mem_row_major)
|
| 115 |
+
__hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0);
|
| 116 |
+
else
|
| 117 |
+
__hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
#ifdef __CUDA_IMMA__
|
| 121 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 122 |
+
__imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 126 |
+
__imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 130 |
+
__imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 134 |
+
__imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 138 |
+
__imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 142 |
+
__imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 146 |
+
__imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 150 |
+
__imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1);
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator,16, 16, 16, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 154 |
+
if (layout == mem_row_major)
|
| 155 |
+
__imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 156 |
+
else
|
| 157 |
+
__imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 158 |
+
}
|
| 159 |
+
#endif /* __CUDA_IMMA__ */
|
| 160 |
+
|
| 161 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 162 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 163 |
+
__mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 167 |
+
__mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 171 |
+
__mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 175 |
+
__mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 176 |
+
}
|
| 177 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
//
|
| 181 |
+
// Load functions for frags of shape m32n8k16
|
| 182 |
+
//
|
| 183 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 184 |
+
__hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 188 |
+
__hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 192 |
+
__hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 196 |
+
__hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) {
|
| 200 |
+
if (layout == mem_row_major)
|
| 201 |
+
__hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0);
|
| 202 |
+
else
|
| 203 |
+
__hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1);
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 207 |
+
if (layout == mem_row_major)
|
| 208 |
+
__hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0);
|
| 209 |
+
else
|
| 210 |
+
__hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
#ifdef __CUDA_IMMA__
|
| 214 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 215 |
+
__imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0);
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 219 |
+
__imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1);
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 223 |
+
__imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 227 |
+
__imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1);
|
| 228 |
+
}
|
| 229 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 230 |
+
__imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 234 |
+
__imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 238 |
+
__imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 242 |
+
__imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 246 |
+
if (layout == mem_row_major)
|
| 247 |
+
__imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 248 |
+
else
|
| 249 |
+
__imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 250 |
+
}
|
| 251 |
+
#endif /* __CUDA_IMMA__ */
|
| 252 |
+
|
| 253 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 254 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 255 |
+
__mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 259 |
+
__mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 263 |
+
__mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 267 |
+
__mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 268 |
+
}
|
| 269 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
//
|
| 273 |
+
// Load functions for frags of shape m8n32k16
|
| 274 |
+
//
|
| 275 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 276 |
+
__hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 280 |
+
__hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 284 |
+
__hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 288 |
+
__hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) {
|
| 292 |
+
if (layout == mem_row_major)
|
| 293 |
+
__hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0);
|
| 294 |
+
else
|
| 295 |
+
__hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 299 |
+
if (layout == mem_row_major)
|
| 300 |
+
__hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0);
|
| 301 |
+
else
|
| 302 |
+
__hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1);
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
#ifdef __CUDA_IMMA__
|
| 306 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 307 |
+
__imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0);
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 311 |
+
__imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1);
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 315 |
+
__imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0);
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 319 |
+
__imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1);
|
| 320 |
+
}
|
| 321 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 322 |
+
__imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0);
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 326 |
+
__imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1);
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 330 |
+
__imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0);
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 334 |
+
__imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1);
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 338 |
+
if (layout == mem_row_major)
|
| 339 |
+
__imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 340 |
+
else
|
| 341 |
+
__imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 342 |
+
}
|
| 343 |
+
#endif /* __CUDA_IMMA__ */
|
| 344 |
+
|
| 345 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 346 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 347 |
+
__mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 351 |
+
__mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 355 |
+
__mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 359 |
+
__mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 360 |
+
}
|
| 361 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 365 |
+
//
|
| 366 |
+
// Load functions for frags of shape m8n8k32
|
| 367 |
+
//
|
| 368 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const void* p, unsigned ldm) {
|
| 369 |
+
__imma_m8n8k32_ld_a_s4((int *)&a, (const int *)p, ldm, 0);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const void* p, unsigned ldm) {
|
| 373 |
+
__imma_m8n8k32_ld_a_u4((int *)&a, (const int *)p, ldm, 0);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& a, const void* p, unsigned ldm) {
|
| 377 |
+
__imma_m8n8k32_ld_b_s4((int *)&a, (const int *)p, ldm, 1);
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& a, const void* p, unsigned ldm) {
|
| 381 |
+
__imma_m8n8k32_ld_b_u4((int *)&a, (const int *)p, ldm, 1);
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 32, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 385 |
+
if (layout == mem_row_major)
|
| 386 |
+
__imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 387 |
+
else
|
| 388 |
+
__imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
//
|
| 392 |
+
// Load functions for frags of shape m8n8k128
|
| 393 |
+
//
|
| 394 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const void* p, unsigned ldm) {
|
| 395 |
+
__bmma_m8n8k128_ld_a_b1((int *)&a, (const int *)p, ldm, 0);
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& a, const void* p, unsigned ldm) {
|
| 399 |
+
__bmma_m8n8k128_ld_b_b1((int *)&a, (const int *)p, ldm, 1);
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 128, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 403 |
+
if (layout == mem_row_major)
|
| 404 |
+
__bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 405 |
+
else
|
| 406 |
+
__bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 407 |
+
}
|
| 408 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 413 |
+
// load functions for frags of shape m16n16k8
|
| 414 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) {
|
| 415 |
+
__mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 0);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) {
|
| 419 |
+
__mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 1);
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) {
|
| 423 |
+
__mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 0);
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) {
|
| 427 |
+
__mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 1);
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 8, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 431 |
+
if (layout == mem_row_major)
|
| 432 |
+
__mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 0);
|
| 433 |
+
else
|
| 434 |
+
__mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 1);
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
// load functions for frags of shape m8n8k4
|
| 438 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) {
|
| 439 |
+
__dmma_m8n8k4_ld_a((double *)&a, p, ldm, 0);
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) {
|
| 443 |
+
__dmma_m8n8k4_ld_a((double *)&a, p, ldm, 1);
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) {
|
| 447 |
+
__dmma_m8n8k4_ld_b((double *)&a, p, ldm, 0);
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) {
|
| 451 |
+
__dmma_m8n8k4_ld_b((double *)&a, p, ldm, 1);
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 4, double>& a, const double* p, unsigned ldm, layout_t layout) {
|
| 455 |
+
if (layout == mem_row_major)
|
| 456 |
+
__dmma_m8n8k4_ld_c((double *)&a, p, ldm, 0);
|
| 457 |
+
else
|
| 458 |
+
__dmma_m8n8k4_ld_c((double *)&a, p, ldm, 1);
|
| 459 |
+
}
|
| 460 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 461 |
+
|
| 462 |
+
//
|
| 463 |
+
// Store functions for frags of shape m16n16k16
|
| 464 |
+
//
|
| 465 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator,16, 16, 16, __half>& a, unsigned ldm, layout_t layout) {
|
| 466 |
+
if (layout == mem_row_major)
|
| 467 |
+
__hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 0);
|
| 468 |
+
else
|
| 469 |
+
__hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 1);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator,16, 16, 16, float>& a, unsigned ldm, layout_t layout) {
|
| 473 |
+
if (layout == mem_row_major)
|
| 474 |
+
__hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 0);
|
| 475 |
+
else
|
| 476 |
+
__hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 1);
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
#ifdef __CUDA_IMMA__
|
| 480 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator,16, 16, 16, int>& a, unsigned ldm, layout_t layout) {
|
| 481 |
+
if (layout == mem_row_major)
|
| 482 |
+
__imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 483 |
+
else
|
| 484 |
+
__imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 485 |
+
}
|
| 486 |
+
#endif /* __CUDA_IMMA__ */
|
| 487 |
+
|
| 488 |
+
//
|
| 489 |
+
// Store functions for frags of shape m32n8k16
|
| 490 |
+
//
|
| 491 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 32, 8, 16, __half>& a, unsigned ldm, layout_t layout) {
|
| 492 |
+
if (layout == mem_row_major)
|
| 493 |
+
__hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 0);
|
| 494 |
+
else
|
| 495 |
+
__hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 1);
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 32, 8, 16, float>& a, unsigned ldm, layout_t layout) {
|
| 499 |
+
if (layout == mem_row_major)
|
| 500 |
+
__hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 0);
|
| 501 |
+
else
|
| 502 |
+
__hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 1);
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
#ifdef __CUDA_IMMA__
|
| 506 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 32, 8, 16, int>& a, unsigned ldm, layout_t layout) {
|
| 507 |
+
if (layout == mem_row_major)
|
| 508 |
+
__imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 509 |
+
else
|
| 510 |
+
__imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 511 |
+
}
|
| 512 |
+
#endif /* __CUDA_IMMA__ */
|
| 513 |
+
|
| 514 |
+
//
|
| 515 |
+
// Store functions for frags of shape m8n32k16
|
| 516 |
+
//
|
| 517 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 8, 32, 16, __half>& a, unsigned ldm, layout_t layout) {
|
| 518 |
+
if (layout == mem_row_major)
|
| 519 |
+
__hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 0);
|
| 520 |
+
else
|
| 521 |
+
__hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 1);
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 8, 32, 16, float>& a, unsigned ldm, layout_t layout) {
|
| 525 |
+
if (layout == mem_row_major)
|
| 526 |
+
__hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 0);
|
| 527 |
+
else
|
| 528 |
+
__hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 1);
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
#ifdef __CUDA_IMMA__
|
| 532 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 32, 16, int>& a, unsigned ldm, layout_t layout) {
|
| 533 |
+
if (layout == mem_row_major)
|
| 534 |
+
__imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 535 |
+
else
|
| 536 |
+
__imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 537 |
+
}
|
| 538 |
+
#endif /* __CUDA_IMMA__ */
|
| 539 |
+
|
| 540 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 541 |
+
//
|
| 542 |
+
// Store functions for frags of shape m8n8k32
|
| 543 |
+
//
|
| 544 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 32, int>& a, unsigned ldm, layout_t layout) {
|
| 545 |
+
if (layout == mem_row_major)
|
| 546 |
+
__imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 547 |
+
else
|
| 548 |
+
__imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
//
|
| 552 |
+
// Store functions for frags of shape m8n8k128
|
| 553 |
+
//
|
| 554 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 128, int>& a, unsigned ldm, layout_t layout) {
|
| 555 |
+
if (layout == mem_row_major)
|
| 556 |
+
__bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 557 |
+
else
|
| 558 |
+
__bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 559 |
+
}
|
| 560 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 564 |
+
|
| 565 |
+
//
|
| 566 |
+
// Store functions for frags of shape m16n16k8
|
| 567 |
+
//
|
| 568 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 16, 16, 8, float>& a, unsigned ldm, layout_t layout) {
|
| 569 |
+
if (layout == mem_row_major)
|
| 570 |
+
__mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 0);
|
| 571 |
+
else
|
| 572 |
+
__mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 1);
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
//
|
| 577 |
+
// Store functions for frags of shape m8n8k4
|
| 578 |
+
//
|
| 579 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment<accumulator, 8, 8, 4, double>& a, unsigned ldm, layout_t layout) {
|
| 580 |
+
if (layout == mem_row_major)
|
| 581 |
+
__dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 0);
|
| 582 |
+
else
|
| 583 |
+
__dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 1);
|
| 584 |
+
}
|
| 585 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 586 |
+
|
| 587 |
+
//
|
| 588 |
+
// MMA functions for shape m16n16k16
|
| 589 |
+
//
|
| 590 |
+
// D fp16, C fp16
|
| 591 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 592 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 596 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 600 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 604 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
// D fp32, C fp16
|
| 608 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 609 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 613 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 617 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 621 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
// D fp32, C fp32
|
| 625 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 626 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 630 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 634 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 638 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
// D fp16, C fp32
|
| 642 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 643 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 647 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 651 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 655 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
#ifdef __CUDA_IMMA__
|
| 659 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 660 |
+
if (satf)
|
| 661 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1);
|
| 662 |
+
else
|
| 663 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0);
|
| 664 |
+
}
|
| 665 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 666 |
+
if (satf)
|
| 667 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1);
|
| 668 |
+
else
|
| 669 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0);
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 673 |
+
if (satf)
|
| 674 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1);
|
| 675 |
+
else
|
| 676 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0);
|
| 677 |
+
}
|
| 678 |
+
|
| 679 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 680 |
+
if (satf)
|
| 681 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1);
|
| 682 |
+
else
|
| 683 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0);
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 687 |
+
if (satf)
|
| 688 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1);
|
| 689 |
+
else
|
| 690 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0);
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 694 |
+
if (satf)
|
| 695 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1);
|
| 696 |
+
else
|
| 697 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0);
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 701 |
+
if (satf)
|
| 702 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1);
|
| 703 |
+
else
|
| 704 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0);
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 708 |
+
if (satf)
|
| 709 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1);
|
| 710 |
+
else
|
| 711 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0);
|
| 712 |
+
}
|
| 713 |
+
#endif /* __CUDA_IMMA__ */
|
| 714 |
+
|
| 715 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 716 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 717 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 718 |
+
}
|
| 719 |
+
|
| 720 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 721 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 722 |
+
}
|
| 723 |
+
|
| 724 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 725 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 729 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 730 |
+
}
|
| 731 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
//
|
| 735 |
+
// MMA functions for shape m32n8k16
|
| 736 |
+
//
|
| 737 |
+
// D fp16, C fp16
|
| 738 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 739 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 743 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 747 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 751 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
// D fp32, C fp16
|
| 755 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 756 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 760 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 761 |
+
}
|
| 762 |
+
|
| 763 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 764 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 765 |
+
}
|
| 766 |
+
|
| 767 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 768 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
// D fp32, C fp32
|
| 772 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 773 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 774 |
+
}
|
| 775 |
+
|
| 776 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 777 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 781 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 785 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
// D fp16, C fp32
|
| 789 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 790 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 791 |
+
}
|
| 792 |
+
|
| 793 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 794 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 798 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 799 |
+
}
|
| 800 |
+
|
| 801 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 802 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
#ifdef __CUDA_IMMA__
|
| 806 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 807 |
+
if (satf)
|
| 808 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 809 |
+
else
|
| 810 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 811 |
+
}
|
| 812 |
+
|
| 813 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 814 |
+
if (satf)
|
| 815 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 816 |
+
else
|
| 817 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 818 |
+
}
|
| 819 |
+
|
| 820 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 821 |
+
if (satf)
|
| 822 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 823 |
+
else
|
| 824 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 828 |
+
if (satf)
|
| 829 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 830 |
+
else
|
| 831 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 835 |
+
if (satf)
|
| 836 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 837 |
+
else
|
| 838 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 839 |
+
}
|
| 840 |
+
|
| 841 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 842 |
+
if (satf)
|
| 843 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 844 |
+
else
|
| 845 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 846 |
+
|
| 847 |
+
}
|
| 848 |
+
|
| 849 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 850 |
+
if (satf)
|
| 851 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 852 |
+
else
|
| 853 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 854 |
+
|
| 855 |
+
}
|
| 856 |
+
|
| 857 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 858 |
+
if (satf)
|
| 859 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 860 |
+
else
|
| 861 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 862 |
+
|
| 863 |
+
}
|
| 864 |
+
#endif /* __CUDA_IMMA__ */
|
| 865 |
+
|
| 866 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 867 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 868 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 872 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 876 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 877 |
+
}
|
| 878 |
+
|
| 879 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 880 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 881 |
+
}
|
| 882 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 883 |
+
|
| 884 |
+
//
|
| 885 |
+
// MMA functions for shape m8n32k16
|
| 886 |
+
//
|
| 887 |
+
// D fp16, C fp16
|
| 888 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 889 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 893 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 897 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 898 |
+
}
|
| 899 |
+
|
| 900 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 901 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 902 |
+
}
|
| 903 |
+
|
| 904 |
+
// D fp32, C fp16
|
| 905 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 906 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 907 |
+
}
|
| 908 |
+
|
| 909 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 910 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 911 |
+
}
|
| 912 |
+
|
| 913 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 914 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 915 |
+
}
|
| 916 |
+
|
| 917 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 918 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
// D fp32, C fp32
|
| 922 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 923 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 927 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 928 |
+
}
|
| 929 |
+
|
| 930 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 931 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 932 |
+
}
|
| 933 |
+
|
| 934 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 935 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 936 |
+
}
|
| 937 |
+
|
| 938 |
+
// D fp16, C fp32
|
| 939 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 940 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 941 |
+
}
|
| 942 |
+
|
| 943 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 944 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 948 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 952 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 953 |
+
}
|
| 954 |
+
|
| 955 |
+
#ifdef __CUDA_IMMA__
|
| 956 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 957 |
+
if (satf)
|
| 958 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 959 |
+
else
|
| 960 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 961 |
+
}
|
| 962 |
+
|
| 963 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 964 |
+
if (satf)
|
| 965 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 966 |
+
else
|
| 967 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 968 |
+
}
|
| 969 |
+
|
| 970 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 971 |
+
if (satf)
|
| 972 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 973 |
+
else
|
| 974 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 978 |
+
if (satf)
|
| 979 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 980 |
+
else
|
| 981 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 985 |
+
if (satf)
|
| 986 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 987 |
+
else
|
| 988 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 989 |
+
}
|
| 990 |
+
|
| 991 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 992 |
+
if (satf)
|
| 993 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 994 |
+
else
|
| 995 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 999 |
+
if (satf)
|
| 1000 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 1001 |
+
else
|
| 1002 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 1003 |
+
}
|
| 1004 |
+
|
| 1005 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 1006 |
+
if (satf)
|
| 1007 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 1008 |
+
else
|
| 1009 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 1010 |
+
}
|
| 1011 |
+
#endif /* __CUDA_IMMA__ */
|
| 1012 |
+
|
| 1013 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 1014 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1015 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 1016 |
+
}
|
| 1017 |
+
|
| 1018 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1019 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 1020 |
+
}
|
| 1021 |
+
|
| 1022 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1023 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 1024 |
+
}
|
| 1025 |
+
|
| 1026 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1027 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 1028 |
+
}
|
| 1029 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 1033 |
+
//
|
| 1034 |
+
// MMA functions for shape m8n8k32
|
| 1035 |
+
//
|
| 1036 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf) {
|
| 1037 |
+
if (satf)
|
| 1038 |
+
__imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 1039 |
+
else
|
| 1040 |
+
__imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf) {
|
| 1044 |
+
if (satf)
|
| 1045 |
+
__imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 1046 |
+
else
|
| 1047 |
+
__imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
//
|
| 1051 |
+
// MMA functions for shape m8n8k128
|
| 1052 |
+
//
|
| 1053 |
+
__CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment<accumulator, 8, 8, 128, int>& d, const fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& b, const fragment<accumulator, 8, 8, 128, int>& c,
|
| 1054 |
+
experimental::bmmaBitOp op, experimental::bmmaAccumulateOp)
|
| 1055 |
+
{
|
| 1056 |
+
|
| 1057 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 1058 |
+
if (op == experimental::bmmaBitOpAND)
|
| 1059 |
+
__bmma_m8n8k128_mma_and_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1);
|
| 1060 |
+
else
|
| 1061 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 1062 |
+
__bmma_m8n8k128_mma_xor_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1);
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 1067 |
+
|
| 1068 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 1069 |
+
//
|
| 1070 |
+
// MMA functions for shape m16n16k8
|
| 1071 |
+
//
|
| 1072 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1073 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 1074 |
+
}
|
| 1075 |
+
|
| 1076 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1077 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1081 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 1082 |
+
}
|
| 1083 |
+
|
| 1084 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1085 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 1086 |
+
}
|
| 1087 |
+
|
| 1088 |
+
|
| 1089 |
+
//
|
| 1090 |
+
// MMA functions for shape m8n8k4
|
| 1091 |
+
//
|
| 1092 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1093 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 1, 0);
|
| 1094 |
+
}
|
| 1095 |
+
|
| 1096 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1097 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 3, 0);
|
| 1098 |
+
}
|
| 1099 |
+
|
| 1100 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1101 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 0, 0);
|
| 1102 |
+
}
|
| 1103 |
+
|
| 1104 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1105 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 2, 0);
|
| 1106 |
+
}
|
| 1107 |
+
|
| 1108 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 1109 |
+
|
| 1110 |
+
};
|
| 1111 |
+
};
|
| 1112 |
+
|
| 1113 |
+
#undef __CUDA_IMMA__
|
| 1114 |
+
#undef __CUDA_SUBBYTE_IMMA__
|
| 1115 |
+
#undef __CUDA_MMA_DEVICE_DECL__
|
| 1116 |
+
#undef __CUDA_AMPERE_MMA__
|
| 1117 |
+
|
| 1118 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
|
| 1119 |
+
|
| 1120 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
#endif /* __CUDA_MMA_HPP__ */
|
| 1124 |
+
|
| 1125 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__)
|
| 1126 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 1127 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__
|
| 1128 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/nvfunctional
ADDED
|
@@ -0,0 +1,621 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* NVIDIA_COPYRIGHT_BEGIN
|
| 3 |
+
*
|
| 4 |
+
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 7 |
+
* and proprietary rights in and to this software, related documentation
|
| 8 |
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
| 9 |
+
* distribution of this software and related documentation without an express
|
| 10 |
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 11 |
+
*
|
| 12 |
+
* NVIDIA_COPYRIGHT_END
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 16 |
+
#if defined(_MSC_VER)
|
| 17 |
+
#pragma message("crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead.")
|
| 18 |
+
#else
|
| 19 |
+
#warning "crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead."
|
| 20 |
+
#endif
|
| 21 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 22 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#ifndef __NV_LIBCXX_FUNCTIONAL_H__
|
| 26 |
+
#define __NV_LIBCXX_FUNCTIONAL_H__
|
| 27 |
+
|
| 28 |
+
#if __cplusplus < 201103L
|
| 29 |
+
#if defined(_MSC_VER)
|
| 30 |
+
#if _MSC_VER < 1800
|
| 31 |
+
#error This library requires VS 2013 and above
|
| 32 |
+
#endif /* _MSC_VER < 1800 */
|
| 33 |
+
#else /* !_MSC_VER */
|
| 34 |
+
#error This library requires support for the ISO C++ 2011 standard
|
| 35 |
+
#endif /* _MSC_VER */
|
| 36 |
+
#endif /* __cplusplus */
|
| 37 |
+
|
| 38 |
+
#if defined(_MSC_VER)
|
| 39 |
+
#define __NV_ALIGNOF __alignof
|
| 40 |
+
#define __NV_NOEXCEPT
|
| 41 |
+
#define __NV_CONSTEXPR
|
| 42 |
+
#else /* !_MSC_VER */
|
| 43 |
+
#define __NV_ALIGNOF alignof
|
| 44 |
+
#define __NV_NOEXCEPT noexcept
|
| 45 |
+
#define __NV_CONSTEXPR constexpr
|
| 46 |
+
#endif /* _MSC_VER */
|
| 47 |
+
|
| 48 |
+
#include <type_traits>
|
| 49 |
+
#include <cstddef>
|
| 50 |
+
#include <new>
|
| 51 |
+
|
| 52 |
+
// n3290 20.8
|
| 53 |
+
namespace nvstd
|
| 54 |
+
{
|
| 55 |
+
|
| 56 |
+
namespace internal {
|
| 57 |
+
|
| 58 |
+
// D.8.1 base (deprecated) [depr.base]
|
| 59 |
+
template <class _Arg, class _Result>
|
| 60 |
+
struct unary_function
|
| 61 |
+
{
|
| 62 |
+
typedef _Arg argument_type;
|
| 63 |
+
typedef _Result result_type;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <class _Arg1, class _Arg2, class _Result>
|
| 67 |
+
struct binary_function
|
| 68 |
+
{
|
| 69 |
+
typedef _Arg1 first_argument_type;
|
| 70 |
+
typedef _Arg2 second_argument_type;
|
| 71 |
+
typedef _Result result_type;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
// move
|
| 75 |
+
template <class _T>
|
| 76 |
+
inline __device__ __host__
|
| 77 |
+
typename std::remove_reference<_T>::type&& move(_T&& __t) __NV_NOEXCEPT
|
| 78 |
+
{
|
| 79 |
+
return static_cast<typename std::remove_reference<_T>::type&&>(__t);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// 20.2.2 swap [utility.swap]
|
| 83 |
+
// swap
|
| 84 |
+
template<class _T,
|
| 85 |
+
class = typename std::enable_if<
|
| 86 |
+
std::is_move_constructible<_T>::value &&
|
| 87 |
+
std::is_move_assignable<_T>::value>::type>
|
| 88 |
+
inline __device__ __host__
|
| 89 |
+
void swap(_T& __a, _T& __b)
|
| 90 |
+
#if !defined(_MSC_VER)
|
| 91 |
+
noexcept(std::is_nothrow_move_constructible<_T>::value &&
|
| 92 |
+
std::is_nothrow_move_assignable<_T>::value)
|
| 93 |
+
#endif /* !defined(_MSC_VER) */
|
| 94 |
+
{
|
| 95 |
+
_T __t(internal::move(__a));
|
| 96 |
+
__a = internal::move(__b);
|
| 97 |
+
__b = internal::move(__t);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// 20.2.3 forward/move helpers [forward]
|
| 101 |
+
// forward
|
| 102 |
+
template <class _T>
|
| 103 |
+
inline __device__ __host__
|
| 104 |
+
_T&& forward(typename std::remove_reference<_T>::type& __t) __NV_NOEXCEPT
|
| 105 |
+
{
|
| 106 |
+
return static_cast<_T&&>(__t);
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
template <class _T>
|
| 110 |
+
inline __device__ __host__
|
| 111 |
+
_T&& forward(typename std::remove_reference<_T>::type&& __t) __NV_NOEXCEPT
|
| 112 |
+
{
|
| 113 |
+
static_assert(!std::is_lvalue_reference<_T>::value,
|
| 114 |
+
"Error: __t is instantiated with an lvalue reference type");
|
| 115 |
+
return static_cast<_T&&>(__t);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
} // namespace internal
|
| 119 |
+
|
| 120 |
+
namespace __functional_helpers
|
| 121 |
+
{
|
| 122 |
+
|
| 123 |
+
struct __dummy_class;
|
| 124 |
+
|
| 125 |
+
// Store small functors locally:
|
| 126 |
+
// a functor is legitimate to local storage if it is one of the following types:
|
| 127 |
+
// * member object pointer;
|
| 128 |
+
// * member function pointer;
|
| 129 |
+
// * closure type of size less than or equal to the largest size of
|
| 130 |
+
// the above types;
|
| 131 |
+
// * function pointer;
|
| 132 |
+
// * any callable class whose size is less than or equal to
|
| 133 |
+
// the largest one of the above types;
|
| 134 |
+
union _Small_functor_types
|
| 135 |
+
{
|
| 136 |
+
void *__obj;
|
| 137 |
+
void (*__func_ptr)();
|
| 138 |
+
void (__dummy_class::*mem_fn_ptr)();
|
| 139 |
+
};
|
| 140 |
+
|
| 141 |
+
struct _Small_functor_data {
|
| 142 |
+
char __data[sizeof(_Small_functor_types)];
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
template <class _RetType, class ..._ArgTypes>
|
| 146 |
+
struct __maybe_base_function
|
| 147 |
+
{ };
|
| 148 |
+
|
| 149 |
+
template <class _RetType, class _T1>
|
| 150 |
+
struct __maybe_base_function<_RetType(_T1)>
|
| 151 |
+
: public internal::unary_function<_T1, _RetType>
|
| 152 |
+
{ };
|
| 153 |
+
|
| 154 |
+
template <class _RetType, class _T1, class _T2>
|
| 155 |
+
struct __maybe_base_function<_RetType(_T1, _T2)>
|
| 156 |
+
: public internal::binary_function<_T1, _T2, _RetType>
|
| 157 |
+
{ };
|
| 158 |
+
|
| 159 |
+
} // namespace __functional_helpers
|
| 160 |
+
|
| 161 |
+
// 20.8.11 Polymorphic function wrappers [func.wrap]
|
| 162 |
+
|
| 163 |
+
// 20.8.11.1 Class bad_function_call [func.wrap.badcall]
|
| 164 |
+
// unimplemented because of exception
|
| 165 |
+
// class bad_function_call : public std::exception
|
| 166 |
+
|
| 167 |
+
// 20.8.11.2 Class template function [func.wrap.func]
|
| 168 |
+
|
| 169 |
+
template<class> class function; // undefined
|
| 170 |
+
|
| 171 |
+
// Simplified version of template class function, which
|
| 172 |
+
// * does not support allocator_arg_t;
|
| 173 |
+
// * does not support target and target_type that rely on RTTI
|
| 174 |
+
// * does not throw bad_function_call exception on invoking a NULL target
|
| 175 |
+
template <class _RetType, class ..._ArgTypes>
|
| 176 |
+
class function<_RetType(_ArgTypes...)>
|
| 177 |
+
: public __functional_helpers::__maybe_base_function<_RetType(_ArgTypes...)>
|
| 178 |
+
{
|
| 179 |
+
__functional_helpers::_Small_functor_data __small_functor_data;
|
| 180 |
+
void *__obj;
|
| 181 |
+
typedef _RetType(*__meta_fn_type)(void *, _ArgTypes...);
|
| 182 |
+
__meta_fn_type __meta_fn;
|
| 183 |
+
typedef void(*__cloner_type)(function &, const function &);
|
| 184 |
+
__cloner_type __cloner;
|
| 185 |
+
typedef void(*__destructor_type)(function *);
|
| 186 |
+
__destructor_type __destructor;
|
| 187 |
+
|
| 188 |
+
#pragma nv_exec_check_disable
|
| 189 |
+
template <class _F>
|
| 190 |
+
__device__ __host__
|
| 191 |
+
__NV_CONSTEXPR bool __use_small_functor_data() const
|
| 192 |
+
{
|
| 193 |
+
return (sizeof(_F) <= sizeof(__small_functor_data) &&
|
| 194 |
+
__NV_ALIGNOF(_F) <= __NV_ALIGNOF(
|
| 195 |
+
__functional_helpers::_Small_functor_types));
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
#pragma nv_exec_check_disable
|
| 199 |
+
__device__ __host__
|
| 200 |
+
void* __get_small_functor_data() const
|
| 201 |
+
{
|
| 202 |
+
return (void*)(&__small_functor_data.__data[0]);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
#pragma nv_exec_check_disable
|
| 206 |
+
__device__ __host__
|
| 207 |
+
bool __is_small_functor_data() const
|
| 208 |
+
{
|
| 209 |
+
return __obj == __get_small_functor_data();
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
#pragma nv_exec_check_disable
|
| 213 |
+
template <class _F>
|
| 214 |
+
__device__ __host__
|
| 215 |
+
static _F& __get_functor(void *__p)
|
| 216 |
+
{
|
| 217 |
+
return *((_F*)__p);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
#pragma nv_exec_check_disable
|
| 221 |
+
template <class _F>
|
| 222 |
+
__device__ __host__
|
| 223 |
+
static bool __is_empty_functor(const _F& /*__p*/)
|
| 224 |
+
{
|
| 225 |
+
return false;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
#pragma nv_exec_check_disable
|
| 229 |
+
template <class _F>
|
| 230 |
+
__device__ __host__
|
| 231 |
+
static bool __is_empty_functor(const _F* __p)
|
| 232 |
+
{
|
| 233 |
+
return !__p;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
#pragma nv_exec_check_disable
|
| 237 |
+
template <class _Res, class _C>
|
| 238 |
+
__device__ __host__
|
| 239 |
+
static bool __is_empty_functor(const _Res _C::* __p)
|
| 240 |
+
{
|
| 241 |
+
return !__p;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
#pragma nv_exec_check_disable
|
| 245 |
+
template <class _Res, class... _Args>
|
| 246 |
+
__device__ __host__
|
| 247 |
+
static bool __is_empty_functor(const function<_Res(_Args...)>& __p)
|
| 248 |
+
{
|
| 249 |
+
return !__p;
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
template <class _F>
|
| 253 |
+
struct __make_cloner
|
| 254 |
+
{
|
| 255 |
+
#pragma nv_exec_check_disable
|
| 256 |
+
__device__ __host__
|
| 257 |
+
static void __clone_data(function &__dest, const function &__src)
|
| 258 |
+
{
|
| 259 |
+
if (__dest.__use_small_functor_data<_F>()) {
|
| 260 |
+
__dest.__obj = __dest.__get_small_functor_data();
|
| 261 |
+
new (__dest.__obj) _F(__src.__get_functor<_F>(__src.__obj));
|
| 262 |
+
}
|
| 263 |
+
else {
|
| 264 |
+
__dest.__obj = new _F(__src.__get_functor<_F>(__src.__obj));
|
| 265 |
+
}
|
| 266 |
+
}
|
| 267 |
+
};
|
| 268 |
+
|
| 269 |
+
template <class _F>
|
| 270 |
+
struct __make_destructor
|
| 271 |
+
{
|
| 272 |
+
#pragma nv_exec_check_disable
|
| 273 |
+
__device__ __host__
|
| 274 |
+
static void __destruct(function *__fn)
|
| 275 |
+
{
|
| 276 |
+
if (__fn->__use_small_functor_data<_F>()) {
|
| 277 |
+
(__fn->__get_functor<_F>(__fn->__obj)).~_F();
|
| 278 |
+
}
|
| 279 |
+
else {
|
| 280 |
+
delete (_F*)(__fn->__obj);
|
| 281 |
+
}
|
| 282 |
+
}
|
| 283 |
+
};
|
| 284 |
+
|
| 285 |
+
// We cannot simple define __make_functor in the following way:
|
| 286 |
+
// template <class _T, _F>
|
| 287 |
+
// __make_functor;
|
| 288 |
+
// template <class _RetType1, class _F, class... _ArgTypes1>
|
| 289 |
+
// struct __make_functor<_RetType1(_ArgTypes1...), _F>
|
| 290 |
+
//
|
| 291 |
+
// because VS 2013 cannot unpack _RetType1(_ArgTypes1...)
|
| 292 |
+
template <class _RetType1, class _F, class... _ArgTypes1>
|
| 293 |
+
struct __make_functor
|
| 294 |
+
{
|
| 295 |
+
typedef _RetType1 type;
|
| 296 |
+
|
| 297 |
+
#pragma nv_exec_check_disable
|
| 298 |
+
__device__ __host__
|
| 299 |
+
static _RetType1 __invoke(void *__d, _ArgTypes1... __args)
|
| 300 |
+
{
|
| 301 |
+
return __get_functor<_F>(__d)(
|
| 302 |
+
internal::forward<_ArgTypes1>(__args)...);
|
| 303 |
+
}
|
| 304 |
+
};
|
| 305 |
+
|
| 306 |
+
template <class _RetType1, class _C, class _M, class... _ArgTypes1>
|
| 307 |
+
struct __make_functor<_RetType1, _M _C::*,_ArgTypes1...>
|
| 308 |
+
{
|
| 309 |
+
typedef _RetType1 type;
|
| 310 |
+
typedef _RetType1(*_Fn)(_ArgTypes1...);
|
| 311 |
+
|
| 312 |
+
#pragma nv_exec_check_disable
|
| 313 |
+
__device__ __host__
|
| 314 |
+
static _RetType1 __invoke(void *__d, _ArgTypes1... __args)
|
| 315 |
+
{
|
| 316 |
+
return __get_functor<_Fn>(__d)(
|
| 317 |
+
internal::forward<_ArgTypes1>(__args)...);
|
| 318 |
+
}
|
| 319 |
+
};
|
| 320 |
+
|
| 321 |
+
// workaround for GCC version below 4.8
|
| 322 |
+
#if (__GNUC__ == 4) && (__GNUC_MINOR__ < 8)
|
| 323 |
+
template <class _F>
|
| 324 |
+
struct __check_callability
|
| 325 |
+
: public std::integral_constant<bool,
|
| 326 |
+
!std::is_same<_F, std::nullptr_t>::value>
|
| 327 |
+
{ };
|
| 328 |
+
#elif defined(_MSC_VER)
|
| 329 |
+
// simulate VC 2013's behavior...
|
| 330 |
+
template <class _F>
|
| 331 |
+
struct __check_callability1
|
| 332 |
+
: public
|
| 333 |
+
std::integral_constant<bool,
|
| 334 |
+
// std::result_of does not handle member pointers well
|
| 335 |
+
std::is_member_pointer<_F>::value ||
|
| 336 |
+
std::is_convertible<
|
| 337 |
+
_RetType,
|
| 338 |
+
typename std::result_of<_F(_ArgTypes...)>::type
|
| 339 |
+
>::value
|
| 340 |
+
>
|
| 341 |
+
{ };
|
| 342 |
+
|
| 343 |
+
template <class _F>
|
| 344 |
+
struct __check_callability
|
| 345 |
+
: public std::integral_constant<
|
| 346 |
+
bool,
|
| 347 |
+
!std::is_same<_F, function>::value &&
|
| 348 |
+
__check_callability1<typename std::remove_cv<_F>::type>::value>
|
| 349 |
+
{ };
|
| 350 |
+
#else /* !((__GNUC__ == 4) && (__GNUC_MINOR__ < 8)) _MSC_VER */
|
| 351 |
+
template <class _F,
|
| 352 |
+
class _T = typename std::result_of<_F(_ArgTypes...)>::type>
|
| 353 |
+
struct __check_callability
|
| 354 |
+
: public std::integral_constant<
|
| 355 |
+
bool,
|
| 356 |
+
!std::is_same<_F, function>::value &&
|
| 357 |
+
std::is_convertible< _T, _RetType>::value>
|
| 358 |
+
{ };
|
| 359 |
+
#endif /* __GNUC__ == 4) && (__GNUC_MINOR__ < 8) */
|
| 360 |
+
|
| 361 |
+
#pragma nv_exec_check_disable
|
| 362 |
+
__device__ __host__
|
| 363 |
+
void __destroy()
|
| 364 |
+
{
|
| 365 |
+
if (__obj) {
|
| 366 |
+
__destructor(this);
|
| 367 |
+
__obj = 0;
|
| 368 |
+
}
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
#pragma nv_exec_check_disable
|
| 372 |
+
__device__ __host__
|
| 373 |
+
void __clear()
|
| 374 |
+
{
|
| 375 |
+
__obj = 0;
|
| 376 |
+
__meta_fn = 0;
|
| 377 |
+
__cloner = 0;
|
| 378 |
+
__destructor = 0;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
public:
|
| 382 |
+
typedef _RetType result_type;
|
| 383 |
+
|
| 384 |
+
/*
|
| 385 |
+
* These typedef(s) are derived from __maybe_base_function
|
| 386 |
+
* typedef T1 argument_type; // only if sizeof...(ArgTypes) == 1 and
|
| 387 |
+
* // the type in ArgTypes is T1
|
| 388 |
+
* typedef T1 first_argument_type; // only if sizeof...(ArgTypes) == 2 and
|
| 389 |
+
* // ArgTypes contains T1 and T2
|
| 390 |
+
* typedef T2 second_argument_type; // only if sizeof...(ArgTypes) == 2 and
|
| 391 |
+
* // ArgTypes contains T1 and T2
|
| 392 |
+
*/
|
| 393 |
+
|
| 394 |
+
// 20.8.11.2.1 construct/copy/destroy [func.wrap.con]
|
| 395 |
+
|
| 396 |
+
#pragma nv_exec_check_disable
|
| 397 |
+
__device__ __host__
|
| 398 |
+
function() __NV_NOEXCEPT
|
| 399 |
+
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {}
|
| 400 |
+
|
| 401 |
+
#pragma nv_exec_check_disable
|
| 402 |
+
__device__ __host__
|
| 403 |
+
function(std::nullptr_t) __NV_NOEXCEPT
|
| 404 |
+
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {}
|
| 405 |
+
|
| 406 |
+
#pragma nv_exec_check_disable
|
| 407 |
+
__device__ __host__
|
| 408 |
+
function(const function &__fn)
|
| 409 |
+
{
|
| 410 |
+
if (__fn.__obj == 0) {
|
| 411 |
+
__clear();
|
| 412 |
+
}
|
| 413 |
+
else {
|
| 414 |
+
__meta_fn = __fn.__meta_fn;
|
| 415 |
+
__destructor = __fn.__destructor;
|
| 416 |
+
__fn.__cloner(*this, __fn);
|
| 417 |
+
__cloner = __fn.__cloner;
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
#pragma nv_exec_check_disable
|
| 422 |
+
__device__ __host__
|
| 423 |
+
function(function &&__fn)
|
| 424 |
+
{
|
| 425 |
+
__fn.swap(*this);
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
// VS 2013 cannot process __check_callability type trait.
|
| 429 |
+
// So, we check callability using static_assert instead of
|
| 430 |
+
// using SFINAE such as
|
| 431 |
+
// template<class _F,
|
| 432 |
+
// class = typename std::enable_if<
|
| 433 |
+
// __check_callability<_F>::value
|
| 434 |
+
// >::type>
|
| 435 |
+
|
| 436 |
+
#pragma nv_exec_check_disable
|
| 437 |
+
template<class _F>
|
| 438 |
+
__device__ __host__
|
| 439 |
+
function(_F);
|
| 440 |
+
|
| 441 |
+
// copy and swap
|
| 442 |
+
#pragma nv_exec_check_disable
|
| 443 |
+
__device__ __host__
|
| 444 |
+
function& operator=(const function& __fn)
|
| 445 |
+
{
|
| 446 |
+
function(__fn).swap(*this);
|
| 447 |
+
return *this;
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
#pragma nv_exec_check_disable
|
| 451 |
+
__device__ __host__
|
| 452 |
+
function& operator=(function&& __fn)
|
| 453 |
+
{
|
| 454 |
+
function(internal::move(__fn)).swap(*this);
|
| 455 |
+
return *this;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
#pragma nv_exec_check_disable
|
| 459 |
+
__device__ __host__
|
| 460 |
+
function& operator=(std::nullptr_t)
|
| 461 |
+
{
|
| 462 |
+
__destroy();
|
| 463 |
+
return *this;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
#pragma nv_exec_check_disable
|
| 467 |
+
template<class _F>
|
| 468 |
+
__device__ __host__
|
| 469 |
+
function&
|
| 470 |
+
operator=(_F&& __fn)
|
| 471 |
+
{
|
| 472 |
+
static_assert(__check_callability<_F>::value,
|
| 473 |
+
"Unable to create functor object!");
|
| 474 |
+
function(internal::forward<_F>(__fn)).swap(*this);
|
| 475 |
+
return *this;
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
#pragma nv_exec_check_disable
|
| 479 |
+
__device__ __host__
|
| 480 |
+
~function()
|
| 481 |
+
{
|
| 482 |
+
__destroy();
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
// 20.8.11.2.2 function modifiers [func.wrap.func.mod]
|
| 486 |
+
#pragma nv_exec_check_disable
|
| 487 |
+
__device__ __host__
|
| 488 |
+
void swap(function& __fn) __NV_NOEXCEPT
|
| 489 |
+
{
|
| 490 |
+
internal::swap(__meta_fn, __fn.__meta_fn);
|
| 491 |
+
internal::swap(__cloner, __fn.__cloner);
|
| 492 |
+
internal::swap(__destructor, __fn.__destructor);
|
| 493 |
+
|
| 494 |
+
if (__is_small_functor_data() && __fn.__is_small_functor_data()) {
|
| 495 |
+
internal::swap(__small_functor_data, __fn.__small_functor_data);
|
| 496 |
+
}
|
| 497 |
+
else if (__is_small_functor_data()) {
|
| 498 |
+
internal::swap(__small_functor_data, __fn.__small_functor_data);
|
| 499 |
+
internal::swap(__obj, __fn.__obj);
|
| 500 |
+
__fn.__obj = __fn.__get_small_functor_data();
|
| 501 |
+
}
|
| 502 |
+
else if (__fn.__is_small_functor_data()) {
|
| 503 |
+
internal::swap(__small_functor_data, __fn.__small_functor_data);
|
| 504 |
+
internal::swap(__obj, __fn.__obj);
|
| 505 |
+
__obj = __get_small_functor_data();
|
| 506 |
+
}
|
| 507 |
+
else {
|
| 508 |
+
internal::swap(__obj, __fn.__obj);
|
| 509 |
+
}
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
// 20.8.11.2.3 function capacity [func.wrap.func.cap]
|
| 513 |
+
#pragma nv_exec_check_disable
|
| 514 |
+
__device__ __host__
|
| 515 |
+
explicit operator bool() const __NV_NOEXCEPT
|
| 516 |
+
{
|
| 517 |
+
return __obj;
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
// 20.8.11.2.4 function invocation [func.wrap.func.inv]
|
| 521 |
+
// function::operator() can only be called in device code
|
| 522 |
+
// to avoid cross-execution space calls
|
| 523 |
+
#pragma nv_exec_check_disable
|
| 524 |
+
__device__ __host__
|
| 525 |
+
_RetType operator()(_ArgTypes...) const;
|
| 526 |
+
|
| 527 |
+
};
|
| 528 |
+
|
| 529 |
+
// Out-of-line definitions
|
| 530 |
+
#pragma nv_exec_check_disable
|
| 531 |
+
template<class _RetType, class... _ArgTypes>
|
| 532 |
+
template<class _F>
|
| 533 |
+
__device__ __host__
|
| 534 |
+
function<_RetType(_ArgTypes...)>::function(_F __fn)
|
| 535 |
+
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0)
|
| 536 |
+
{
|
| 537 |
+
static_assert(__check_callability<_F>::value,
|
| 538 |
+
"Unable to construct functor object!");
|
| 539 |
+
if (__is_empty_functor(__fn))
|
| 540 |
+
return;
|
| 541 |
+
__meta_fn = &__make_functor<_RetType, _F, _ArgTypes...>::__invoke;
|
| 542 |
+
__cloner = &__make_cloner<_F>::__clone_data;
|
| 543 |
+
__destructor = &__make_destructor<_F>::__destruct;
|
| 544 |
+
|
| 545 |
+
if (__use_small_functor_data<_F>()) {
|
| 546 |
+
__obj = __get_small_functor_data();
|
| 547 |
+
new ((void*)__obj) _F(internal::move(__fn));
|
| 548 |
+
}
|
| 549 |
+
else {
|
| 550 |
+
__obj = new _F(internal::move(__fn));
|
| 551 |
+
}
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
#pragma nv_exec_check_disable
|
| 555 |
+
template <class _RetType, class..._ArgTypes>
|
| 556 |
+
__device__ __host__
|
| 557 |
+
_RetType
|
| 558 |
+
function<_RetType(_ArgTypes...)>::operator()(_ArgTypes... __args) const
|
| 559 |
+
{
|
| 560 |
+
return __meta_fn(__obj, internal::forward<_ArgTypes>(__args)...);
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
// 20.8.11.2.6, Null pointer comparisons:
|
| 564 |
+
|
| 565 |
+
#pragma nv_exec_check_disable
|
| 566 |
+
template <class _R, class... _ArgTypes>
|
| 567 |
+
__device__ __host__
|
| 568 |
+
bool operator==(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t)
|
| 569 |
+
__NV_NOEXCEPT
|
| 570 |
+
{
|
| 571 |
+
return !__fn;
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
#pragma nv_exec_check_disable
|
| 575 |
+
template <class _R, class... _ArgTypes>
|
| 576 |
+
__device__ __host__
|
| 577 |
+
bool operator==(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn)
|
| 578 |
+
__NV_NOEXCEPT
|
| 579 |
+
{
|
| 580 |
+
return !__fn;
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
#pragma nv_exec_check_disable
|
| 584 |
+
template <class _R, class... _ArgTypes>
|
| 585 |
+
__device__ __host__
|
| 586 |
+
bool operator!=(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t)
|
| 587 |
+
__NV_NOEXCEPT
|
| 588 |
+
{
|
| 589 |
+
return static_cast<bool>(__fn);
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
#pragma nv_exec_check_disable
|
| 593 |
+
template <class _R, class... _ArgTypes>
|
| 594 |
+
__device__ __host__
|
| 595 |
+
bool operator!=(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn)
|
| 596 |
+
__NV_NOEXCEPT
|
| 597 |
+
{
|
| 598 |
+
return static_cast<bool>(__fn);
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
// 20.8.11.2.7, specialized algorithms:
|
| 602 |
+
#pragma nv_exec_check_disable
|
| 603 |
+
template <class _R, class... _ArgTypes>
|
| 604 |
+
__device__ __host__
|
| 605 |
+
void swap(function<_R(_ArgTypes...)>& __fn1, function<_R(_ArgTypes...)>& __fn2)
|
| 606 |
+
{
|
| 607 |
+
__fn1.swap(__fn2);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
} // namespace nvstd
|
| 611 |
+
|
| 612 |
+
#undef __NV_NOEXCEPT
|
| 613 |
+
#undef __NV_CONSTEXPR
|
| 614 |
+
#undef __NV_ALIGNOF
|
| 615 |
+
|
| 616 |
+
#endif // __NV_LIBCXX_FUNCTIONAL_H__
|
| 617 |
+
|
| 618 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__)
|
| 619 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 620 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__
|
| 621 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/crt/sm_90_rt.hpp
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__SM_90_RT_HPP__)
|
| 61 |
+
#define __SM_90_RT_HPP__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC_RTC__)
|
| 64 |
+
#define __SM_90_RT_DECL__ __host__ __device__
|
| 65 |
+
#else /* !__CUDACC_RTC__ */
|
| 66 |
+
#define __SM_90_RT_DECL__ static __device__ __inline__
|
| 67 |
+
#endif /* __CUDACC_RTC__ */
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
|
| 72 |
+
|
| 73 |
+
/*******************************************************************************
|
| 74 |
+
* *
|
| 75 |
+
* *
|
| 76 |
+
* *
|
| 77 |
+
*******************************************************************************/
|
| 78 |
+
|
| 79 |
+
#include "builtin_types.h"
|
| 80 |
+
#include "device_types.h"
|
| 81 |
+
#include "host_defines.h"
|
| 82 |
+
|
| 83 |
+
/*******************************************************************************
|
| 84 |
+
* *
|
| 85 |
+
* Below are implementations of SM-9.0 builtin functions which are included as *
|
| 86 |
+
* source (instead of being built in to the compiler) *
|
| 87 |
+
* *
|
| 88 |
+
*******************************************************************************/
|
| 89 |
+
extern "C" {
|
| 90 |
+
__device__ unsigned __nv_isClusterShared_impl(const void *);
|
| 91 |
+
__device__ void * __nv_cluster_map_shared_rank_impl(const void *, unsigned);
|
| 92 |
+
__device__ unsigned __nv_cluster_query_shared_rank_impl(const void *);
|
| 93 |
+
__device__ unsigned __nv_clusterDimIsSpecifed_impl();
|
| 94 |
+
__device__ void __nv_clusterDim_impl(unsigned *, unsigned *, unsigned *);
|
| 95 |
+
__device__ void __nv_clusterRelativeBlockIdx_impl(unsigned *,
|
| 96 |
+
unsigned *, unsigned *);
|
| 97 |
+
__device__ void __nv_clusterGridDimInClusters_impl(unsigned *,
|
| 98 |
+
unsigned *, unsigned *);
|
| 99 |
+
__device__ void __nv_clusterIdx_impl(unsigned *, unsigned *, unsigned *);
|
| 100 |
+
__device__ unsigned __nv_clusterRelativeBlockRank_impl();
|
| 101 |
+
__device__ unsigned __nv_clusterSizeInBlocks_impl();
|
| 102 |
+
__device__ void __nv_cluster_barrier_arrive_impl();
|
| 103 |
+
__device__ void __nv_cluster_barrier_arrive_relaxed_impl();
|
| 104 |
+
__device__ void __nv_cluster_barrier_wait_impl();
|
| 105 |
+
__device__ void __nv_threadfence_cluster_impl();
|
| 106 |
+
|
| 107 |
+
__device__ __device_builtin__ float2 __f2AtomicAdd(float2 *, float2);
|
| 108 |
+
__device__ __device_builtin__ float2 __f2AtomicAdd_block(float2 *, float2);
|
| 109 |
+
__device__ __device_builtin__ float2 __f2AtomicAdd_system(float2 *, float2);
|
| 110 |
+
__device__ __device_builtin__ float4 __f4AtomicAdd(float4 *, float4);
|
| 111 |
+
__device__ __device_builtin__ float4 __f4AtomicAdd_block(float4 *, float4);
|
| 112 |
+
__device__ __device_builtin__ float4 __f4AtomicAdd_system(float4 *, float4);
|
| 113 |
+
} // extern "C"
|
| 114 |
+
|
| 115 |
+
__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr)
|
| 116 |
+
{
|
| 117 |
+
return __isShared(ptr);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr)
|
| 121 |
+
{
|
| 122 |
+
return __nv_isClusterShared_impl(ptr);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr,
|
| 126 |
+
unsigned target_block_rank)
|
| 127 |
+
{
|
| 128 |
+
return __nv_cluster_map_shared_rank_impl(ptr, target_block_rank);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr)
|
| 132 |
+
{
|
| 133 |
+
return __nv_cluster_query_shared_rank_impl(ptr);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr,
|
| 137 |
+
unsigned int cluster_cta_mask)
|
| 138 |
+
{
|
| 139 |
+
return make_uint2((unsigned)__cvta_generic_to_shared(ptr), cluster_cta_mask);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified()
|
| 143 |
+
{
|
| 144 |
+
return __nv_clusterDimIsSpecifed_impl();
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
__SM_90_RT_DECL__ dim3 __clusterDim()
|
| 148 |
+
{
|
| 149 |
+
unsigned x, y, z;
|
| 150 |
+
__nv_clusterDim_impl(&x, &y, &z);
|
| 151 |
+
return dim3(x,y,z);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx()
|
| 155 |
+
{
|
| 156 |
+
unsigned x, y, z;
|
| 157 |
+
__nv_clusterRelativeBlockIdx_impl(&x, &y, &z);
|
| 158 |
+
return dim3(x,y,z);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters()
|
| 162 |
+
{
|
| 163 |
+
unsigned x, y, z;
|
| 164 |
+
__nv_clusterGridDimInClusters_impl(&x, &y, &z);
|
| 165 |
+
return dim3(x,y,z);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
__SM_90_RT_DECL__ dim3 __clusterIdx()
|
| 169 |
+
{
|
| 170 |
+
unsigned x, y, z;
|
| 171 |
+
__nv_clusterIdx_impl(&x, &y, &z);
|
| 172 |
+
return dim3(x,y,z);
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank()
|
| 176 |
+
{
|
| 177 |
+
return __nv_clusterRelativeBlockRank_impl();
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks()
|
| 181 |
+
{
|
| 182 |
+
return __nv_clusterSizeInBlocks_impl();
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
__SM_90_RT_DECL__ void __cluster_barrier_arrive()
|
| 186 |
+
{
|
| 187 |
+
__nv_cluster_barrier_arrive_impl();
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed()
|
| 191 |
+
{
|
| 192 |
+
__nv_cluster_barrier_arrive_relaxed_impl();
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
__SM_90_RT_DECL__ void __cluster_barrier_wait()
|
| 196 |
+
{
|
| 197 |
+
__nv_cluster_barrier_wait_impl();
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
__SM_90_RT_DECL__ void __threadfence_cluster()
|
| 201 |
+
{
|
| 202 |
+
__nv_threadfence_cluster_impl();
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
/* Define __PTR for atomicAdd prototypes below, undef after done */
|
| 207 |
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
| 208 |
+
#define __PTR "l"
|
| 209 |
+
#else
|
| 210 |
+
#define __PTR "r"
|
| 211 |
+
#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
|
| 212 |
+
|
| 213 |
+
__SM_90_RT_DECL__ float2 atomicAdd(float2 *address, float2 val) {
|
| 214 |
+
return __f2AtomicAdd(address, val);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *address, float2 val) {
|
| 218 |
+
return __f2AtomicAdd_block(address, val);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *address, float2 val) {
|
| 222 |
+
return __f2AtomicAdd_system(address, val);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
__SM_90_RT_DECL__ float4 atomicAdd(float4 *address, float4 val) {
|
| 226 |
+
return __f4AtomicAdd(address, val);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *address, float4 val) {
|
| 230 |
+
return __f4AtomicAdd_block(address, val);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *address, float4 val) {
|
| 234 |
+
return __f4AtomicAdd_system(address, val);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */
|
| 238 |
+
|
| 239 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 240 |
+
|
| 241 |
+
#undef __SM_90_RT_DECL__
|
| 242 |
+
|
| 243 |
+
#endif /* !__SM_90_RT_HPP__ */
|
| 244 |
+
|
| 245 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__)
|
| 246 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 247 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__
|
| 248 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExt.h
ADDED
|
@@ -0,0 +1,1499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO USER:
|
| 5 |
+
*
|
| 6 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 11 |
+
* of a form of NVIDIA software license agreement.
|
| 12 |
+
*
|
| 13 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 14 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 15 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 16 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 17 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 18 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 19 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 20 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 21 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 22 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 23 |
+
*
|
| 24 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 25 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 26 |
+
* "commercial computer software" and "commercial computer software
|
| 27 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 28 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 29 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 30 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 31 |
+
* source code with only those rights set forth herein.
|
| 32 |
+
*
|
| 33 |
+
* Any use of this source code in individual and commercial software must
|
| 34 |
+
* include, in the user documentation and internal comments to the code,
|
| 35 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
/** \file nvToolsExt.h
|
| 39 |
+
*/
|
| 40 |
+
|
| 41 |
+
/* ========================================================================= */
|
| 42 |
+
/** \mainpage
|
| 43 |
+
* \tableofcontents
|
| 44 |
+
* \section INTRODUCTION Introduction
|
| 45 |
+
*
|
| 46 |
+
* The NVIDIA Tools Extension library is a set of functions that a
|
| 47 |
+
* developer can use to provide additional information to tools.
|
| 48 |
+
* The additional information is used by the tool to improve
|
| 49 |
+
* analysis and visualization of data.
|
| 50 |
+
*
|
| 51 |
+
* The library introduces close to zero overhead if no tool is
|
| 52 |
+
* attached to the application. The overhead when a tool is
|
| 53 |
+
* attached is specific to the tool.
|
| 54 |
+
*
|
| 55 |
+
* \section INITIALIZATION_SECTION Initialization
|
| 56 |
+
*
|
| 57 |
+
* Typically the tool's library that plugs into NVTX is indirectly
|
| 58 |
+
* loaded via enviromental properties that are platform specific.
|
| 59 |
+
* For some platform or special cases, the user may be required
|
| 60 |
+
* to instead explicity initialize instead though. This can also
|
| 61 |
+
* be helpful to control when the API loads a tool's library instead
|
| 62 |
+
* of what would typically be the first function call to emit info.
|
| 63 |
+
* For these rare case, see \ref INITIALIZATION for additional information.
|
| 64 |
+
*
|
| 65 |
+
* \section MARKERS_AND_RANGES Markers and Ranges
|
| 66 |
+
*
|
| 67 |
+
* Markers and ranges are used to describe events at a specific time (markers)
|
| 68 |
+
* or over a time span (ranges) during the execution of the application
|
| 69 |
+
* respectively.
|
| 70 |
+
*
|
| 71 |
+
* \subsection MARKERS Markers
|
| 72 |
+
*
|
| 73 |
+
* Markers denote specific moments in time.
|
| 74 |
+
*
|
| 75 |
+
*
|
| 76 |
+
* See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on
|
| 77 |
+
* how to specify the domain.
|
| 78 |
+
*
|
| 79 |
+
* \subsection THREAD_RANGES Thread Ranges
|
| 80 |
+
*
|
| 81 |
+
* Thread ranges denote nested time ranges. Nesting is maintained per thread
|
| 82 |
+
* per domain and does not require any additional correlation mechanism. The
|
| 83 |
+
* duration of a thread range is defined by the corresponding pair of
|
| 84 |
+
* nvtxRangePush* to nvtxRangePop API calls.
|
| 85 |
+
*
|
| 86 |
+
* See \ref DOMAINS and \ref EVENT_ATTRIBUTES for additional information on
|
| 87 |
+
* how to specify the domain.
|
| 88 |
+
*
|
| 89 |
+
* \subsection PROCESS_RANGES Process Ranges
|
| 90 |
+
*
|
| 91 |
+
* Process ranges denote a time span that can expose arbitrary concurrency, as
|
| 92 |
+
* opposed to thread ranges that only support nesting. In addition the range
|
| 93 |
+
* start event can happen on a different thread than the end marker. For the
|
| 94 |
+
* correlation of a start/end pair an unique correlation ID is used that is
|
| 95 |
+
* returned from the start API call and needs to be passed into the end API
|
| 96 |
+
* call.
|
| 97 |
+
*
|
| 98 |
+
* \subsection EVENT_ATTRIBUTES Event Attributes
|
| 99 |
+
*
|
| 100 |
+
* \ref MARKERS_AND_RANGES can be annotated with various attributes to provide
|
| 101 |
+
* additional information for an event or to guide the tool's visualization of
|
| 102 |
+
* the data. Each of the attributes is optional and if left unused the
|
| 103 |
+
* attributes fall back to a default value. The attributes include:
|
| 104 |
+
* - color
|
| 105 |
+
* - category
|
| 106 |
+
*
|
| 107 |
+
* To specify any attribute other than the text message, the \ref
|
| 108 |
+
* EVENT_ATTRIBUTE_STRUCTURE "Event Attribute Structure" must be used.
|
| 109 |
+
*
|
| 110 |
+
* \section DOMAINS Domains
|
| 111 |
+
*
|
| 112 |
+
* Domains enable developers to scope annotations. By default all events and
|
| 113 |
+
* annotations are in the default domain. Additional domains can be registered.
|
| 114 |
+
* This allows developers to scope markers, ranges, and resources names to
|
| 115 |
+
* avoid conflicts.
|
| 116 |
+
*
|
| 117 |
+
* The function ::nvtxDomainCreateA or ::nvtxDomainCreateW is used to create
|
| 118 |
+
* a named domain.
|
| 119 |
+
*
|
| 120 |
+
* Each domain maintains its own
|
| 121 |
+
* - categories
|
| 122 |
+
* - thread range stacks
|
| 123 |
+
* - registered strings
|
| 124 |
+
*
|
| 125 |
+
* The function ::nvtxDomainDestroy marks the end of the domain. Destroying
|
| 126 |
+
* a domain unregisters and destroys all objects associated with it such as
|
| 127 |
+
* registered strings, resource objects, named categories, and started ranges.
|
| 128 |
+
*
|
| 129 |
+
* \section RESOURCE_NAMING Resource Naming
|
| 130 |
+
*
|
| 131 |
+
* This section covers calls that allow to annotate objects with user-provided
|
| 132 |
+
* names in order to allow for a better analysis of complex trace data. All of
|
| 133 |
+
* the functions take the handle or the ID of the object to name and the name.
|
| 134 |
+
* The functions can be called multiple times during the execution of an
|
| 135 |
+
* application, however, in that case it is implementation dependent which
|
| 136 |
+
* name will be reported by the tool.
|
| 137 |
+
*
|
| 138 |
+
* \subsection CATEGORY_NAMING Category Naming
|
| 139 |
+
*
|
| 140 |
+
* Some function in this library support associating an integer category
|
| 141 |
+
* to enable filtering and sorting. The category naming functions allow
|
| 142 |
+
* the application to associate a user friendly name with the integer
|
| 143 |
+
* category. Support for domains have been added in NVTX_VERSION_2 to
|
| 144 |
+
* avoid collisions when domains are developed independantly.
|
| 145 |
+
*
|
| 146 |
+
* \subsection RESOURCE_OBJECTS Resource Objects
|
| 147 |
+
*
|
| 148 |
+
* Resource objects are a generic mechanism for attaching data to an application
|
| 149 |
+
* resource. The identifier field makes the association to a pointer or handle,
|
| 150 |
+
* while the type field helps provide deeper understanding of the identifier as
|
| 151 |
+
* well as enabling differentiation in cases where handles generated by different
|
| 152 |
+
* APIs may collide. The resource object may also have an associated message to
|
| 153 |
+
* associate with the application resource, enabling further annotation of this
|
| 154 |
+
* object and how it is used.
|
| 155 |
+
*
|
| 156 |
+
* The resource object was introduced in NVTX_VERSION_2 to supersede existing naming
|
| 157 |
+
* functions and allow the application resource identified by those functions to be
|
| 158 |
+
* associated to a domain. The other naming functions are still supported for backward
|
| 159 |
+
* compatibility but will be associated only to the default domain.
|
| 160 |
+
*
|
| 161 |
+
* \subsection RESOURCE_NAMING_OS Resource Naming
|
| 162 |
+
*
|
| 163 |
+
* Some operating system resources creation APIs do not support providing a user friendly
|
| 164 |
+
* name, such as some OS thread creation APIs. This API support resource naming though
|
| 165 |
+
* both through resource objects and functions following the pattern
|
| 166 |
+
* nvtxName[RESOURCE_TYPE][A|W](identifier, name). Resource objects introduced in NVTX_VERSION 2
|
| 167 |
+
* supersede the other functions with a a more general method of assigning names to OS resources,
|
| 168 |
+
* along with associating them to domains too. The older nvtxName* functions are only associated
|
| 169 |
+
* with the default domain.
|
| 170 |
+
* \section EXTENSIONS Optional Extensions
|
| 171 |
+
* Optional extensions will either appear within the existing sections the extend or appear
|
| 172 |
+
* in the "Related Pages" when they introduce new concepts.
|
| 173 |
+
*/
|
| 174 |
+
|
| 175 |
+
/**
|
| 176 |
+
* Tools Extension API version
|
| 177 |
+
*/
|
| 178 |
+
#if defined(NVTX_VERSION) && NVTX_VERSION < 3
|
| 179 |
+
#error "Trying to #include NVTX version 3 in a source file where an older NVTX version has already been included. If you are not directly using NVTX (the NVIDIA Tools Extension library), you are getting this error because libraries you are using have included different versions of NVTX. Suggested solutions are: (1) reorder #includes so the newest NVTX version is included first, (2) avoid using the conflicting libraries in the same .c/.cpp file, or (3) update the library using the older NVTX version to use the newer version instead."
|
| 180 |
+
#endif
|
| 181 |
+
|
| 182 |
+
/* Header guard */
|
| 183 |
+
#if !defined(NVTX_VERSION)
|
| 184 |
+
#define NVTX_VERSION 3
|
| 185 |
+
|
| 186 |
+
#if defined(_MSC_VER)
|
| 187 |
+
#define NVTX_API __stdcall
|
| 188 |
+
#define NVTX_INLINE_STATIC __inline static
|
| 189 |
+
#else /*defined(__GNUC__)*/
|
| 190 |
+
#define NVTX_API
|
| 191 |
+
#define NVTX_INLINE_STATIC inline static
|
| 192 |
+
#endif /* Platform */
|
| 193 |
+
|
| 194 |
+
#if defined(NVTX_NO_IMPL)
|
| 195 |
+
/* When omitting implementation, avoid declaring functions inline */
|
| 196 |
+
/* without definitions, since this causes compiler warnings. */
|
| 197 |
+
#define NVTX_DECLSPEC
|
| 198 |
+
#elif defined(NVTX_EXPORT_API)
|
| 199 |
+
/* Allow overriding definition of NVTX_DECLSPEC when exporting API. */
|
| 200 |
+
/* Default is empty, meaning non-inline with external linkage. */
|
| 201 |
+
#if !defined(NVTX_DECLSPEC)
|
| 202 |
+
#define NVTX_DECLSPEC
|
| 203 |
+
#endif
|
| 204 |
+
#else
|
| 205 |
+
/* Normal NVTX usage defines the NVTX API inline with static */
|
| 206 |
+
/* (internal) linkage. */
|
| 207 |
+
#define NVTX_DECLSPEC NVTX_INLINE_STATIC
|
| 208 |
+
#endif
|
| 209 |
+
|
| 210 |
+
#include "nvtxDetail/nvtxLinkOnce.h"
|
| 211 |
+
|
| 212 |
+
#define NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION) NAME##_v##VERSION
|
| 213 |
+
#define NVTX_VERSIONED_IDENTIFIER_L2(NAME, VERSION) NVTX_VERSIONED_IDENTIFIER_L3(NAME, VERSION)
|
| 214 |
+
#define NVTX_VERSIONED_IDENTIFIER(NAME) NVTX_VERSIONED_IDENTIFIER_L2(NAME, NVTX_VERSION)
|
| 215 |
+
|
| 216 |
+
/**
|
| 217 |
+
* The nvToolsExt library depends on stdint.h. If the build tool chain in use
|
| 218 |
+
* does not include stdint.h then define NVTX_STDINT_TYPES_ALREADY_DEFINED
|
| 219 |
+
* and define the following types:
|
| 220 |
+
* <ul>
|
| 221 |
+
* <li>uint8_t
|
| 222 |
+
* <li>int8_t
|
| 223 |
+
* <li>uint16_t
|
| 224 |
+
* <li>int16_t
|
| 225 |
+
* <li>uint32_t
|
| 226 |
+
* <li>int32_t
|
| 227 |
+
* <li>uint64_t
|
| 228 |
+
* <li>int64_t
|
| 229 |
+
* <li>uintptr_t
|
| 230 |
+
* <li>intptr_t
|
| 231 |
+
* </ul>
|
| 232 |
+
* #define NVTX_STDINT_TYPES_ALREADY_DEFINED if you are using your own header file.
|
| 233 |
+
*/
|
| 234 |
+
#ifndef NVTX_STDINT_TYPES_ALREADY_DEFINED
|
| 235 |
+
#include <stdint.h>
|
| 236 |
+
#endif
|
| 237 |
+
|
| 238 |
+
#include <stddef.h>
|
| 239 |
+
|
| 240 |
+
#ifdef __cplusplus
|
| 241 |
+
extern "C" {
|
| 242 |
+
#endif /* __cplusplus */
|
| 243 |
+
|
| 244 |
+
/**
|
| 245 |
+
* Result Codes
|
| 246 |
+
*/
|
| 247 |
+
|
| 248 |
+
#define NVTX_SUCCESS 0
|
| 249 |
+
#define NVTX_FAIL 1
|
| 250 |
+
#define NVTX_ERR_INIT_LOAD_PROPERTY 2
|
| 251 |
+
#define NVTX_ERR_INIT_ACCESS_LIBRARY 3
|
| 252 |
+
#define NVTX_ERR_INIT_LOAD_LIBRARY 4
|
| 253 |
+
#define NVTX_ERR_INIT_MISSING_LIBRARY_ENTRY_POINT 5
|
| 254 |
+
#define NVTX_ERR_INIT_FAILED_LIBRARY_ENTRY_POINT 6
|
| 255 |
+
#define NVTX_ERR_NO_INJECTION_LIBRARY_AVAILABLE 7
|
| 256 |
+
|
| 257 |
+
/**
|
| 258 |
+
* Size of the nvtxEventAttributes_t structure.
|
| 259 |
+
*/
|
| 260 |
+
#define NVTX_EVENT_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxEventAttributes_t) ) )
|
| 261 |
+
|
| 262 |
+
#define NVTX_NO_PUSH_POP_TRACKING ((int)-2)
|
| 263 |
+
|
| 264 |
+
typedef uint64_t nvtxRangeId_t;
|
| 265 |
+
|
| 266 |
+
/* Forward declaration of opaque domain registration structure */
|
| 267 |
+
struct nvtxDomainRegistration_st;
|
| 268 |
+
typedef struct nvtxDomainRegistration_st nvtxDomainRegistration;
|
| 269 |
+
|
| 270 |
+
/* \brief Domain Handle Structure.
|
| 271 |
+
* \anchor DOMAIN_HANDLE_STRUCTURE
|
| 272 |
+
*
|
| 273 |
+
* This structure is opaque to the user and is used as a handle to reference
|
| 274 |
+
* a domain. This type is returned from tools when using the NVTX API to
|
| 275 |
+
* create a domain.
|
| 276 |
+
*
|
| 277 |
+
*/
|
| 278 |
+
typedef nvtxDomainRegistration* nvtxDomainHandle_t;
|
| 279 |
+
|
| 280 |
+
/* Forward declaration of opaque string registration structure */
|
| 281 |
+
struct nvtxStringRegistration_st;
|
| 282 |
+
typedef struct nvtxStringRegistration_st nvtxStringRegistration;
|
| 283 |
+
|
| 284 |
+
/* \brief Registered String Handle Structure.
|
| 285 |
+
* \anchor REGISTERED_STRING_HANDLE_STRUCTURE
|
| 286 |
+
*
|
| 287 |
+
* This structure is opaque to the user and is used as a handle to reference
|
| 288 |
+
* a registered string. This type is returned from tools when using the NVTX
|
| 289 |
+
* API to create a registered string.
|
| 290 |
+
*
|
| 291 |
+
*/
|
| 292 |
+
typedef nvtxStringRegistration* nvtxStringHandle_t;
|
| 293 |
+
|
| 294 |
+
/* ========================================================================= */
|
| 295 |
+
/** \defgroup GENERAL General
|
| 296 |
+
* @{
|
| 297 |
+
*/
|
| 298 |
+
|
| 299 |
+
/** ---------------------------------------------------------------------------
|
| 300 |
+
* Color Types
|
| 301 |
+
* ------------------------------------------------------------------------- */
|
| 302 |
+
typedef enum nvtxColorType_t
|
| 303 |
+
{
|
| 304 |
+
NVTX_COLOR_UNKNOWN = 0, /**< Color attribute is unused. */
|
| 305 |
+
NVTX_COLOR_ARGB = 1 /**< An ARGB color is provided. */
|
| 306 |
+
} nvtxColorType_t;
|
| 307 |
+
|
| 308 |
+
/** ---------------------------------------------------------------------------
|
| 309 |
+
* Message Types
|
| 310 |
+
* ------------------------------------------------------------------------- */
|
| 311 |
+
typedef enum nvtxMessageType_t
|
| 312 |
+
{
|
| 313 |
+
NVTX_MESSAGE_UNKNOWN = 0, /**< Message payload is unused. */
|
| 314 |
+
NVTX_MESSAGE_TYPE_ASCII = 1, /**< A character sequence is used as payload. */
|
| 315 |
+
NVTX_MESSAGE_TYPE_UNICODE = 2, /**< A wide character sequence is used as payload. */
|
| 316 |
+
/* NVTX_VERSION_2 */
|
| 317 |
+
NVTX_MESSAGE_TYPE_REGISTERED = 3, /**< A unique string handle that was registered
|
| 318 |
+
with \ref nvtxDomainRegisterStringA() or
|
| 319 |
+
\ref nvtxDomainRegisterStringW(). */
|
| 320 |
+
} nvtxMessageType_t;
|
| 321 |
+
|
| 322 |
+
typedef union nvtxMessageValue_t
|
| 323 |
+
{
|
| 324 |
+
const char* ascii;
|
| 325 |
+
const wchar_t* unicode;
|
| 326 |
+
/* NVTX_VERSION_2 */
|
| 327 |
+
nvtxStringHandle_t registered;
|
| 328 |
+
} nvtxMessageValue_t;
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
/** @} */ /*END defgroup*/
|
| 332 |
+
/* ------------------------------------------------------------------------- */
|
| 333 |
+
/** \brief Force initialization (optional)
|
| 334 |
+
*
|
| 335 |
+
* Force NVTX library to initialize. The first call to any NVTX API function
|
| 336 |
+
* will automatically initialize the entire API. This can make the first call
|
| 337 |
+
* much slower than subsequent calls. In applications where the first call to
|
| 338 |
+
* NVTX may be in a performance-critical section, calling nvtxInitialize before
|
| 339 |
+
* any performance-critical sections will ensure NVTX initialization occurs at
|
| 340 |
+
* an acceptable time. Since nvtxInitialize takes no parameters and has no
|
| 341 |
+
* expected behavior besides initialization, it is convenient to add a call to
|
| 342 |
+
* nvtxInitialize in NVTX-instrumented applications that need to force earlier
|
| 343 |
+
* initialization without changing any other code. For example, if an app's
|
| 344 |
+
* first NVTX call is nvtxDomainCreate, and it is difficult to move that call
|
| 345 |
+
* earlier because the domain handle must be stored in an object only created
|
| 346 |
+
* at that point, adding a call to nvtxInitialize at the top of main() will
|
| 347 |
+
* ensure the later call to nvtxDomainCreate is as fast as possible.
|
| 348 |
+
*
|
| 349 |
+
* \version \NVTX_VERSION_3
|
| 350 |
+
*
|
| 351 |
+
* \param reserved - must be zero or NULL.
|
| 352 |
+
*
|
| 353 |
+
* @{ */
|
| 354 |
+
NVTX_DECLSPEC void NVTX_API nvtxInitialize(const void* reserved);
|
| 355 |
+
/** @} */
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
/** @} */ /*END defgroup*/
|
| 359 |
+
|
| 360 |
+
/* ========================================================================= */
|
| 361 |
+
/** \defgroup EVENT_ATTRIBUTES Event Attributes
|
| 362 |
+
* @{
|
| 363 |
+
*/
|
| 364 |
+
|
| 365 |
+
/** ---------------------------------------------------------------------------
|
| 366 |
+
* Payload Types
|
| 367 |
+
* ------------------------------------------------------------------------- */
|
| 368 |
+
typedef enum nvtxPayloadType_t
|
| 369 |
+
{
|
| 370 |
+
NVTX_PAYLOAD_UNKNOWN = 0, /**< Color payload is unused. */
|
| 371 |
+
NVTX_PAYLOAD_TYPE_UNSIGNED_INT64 = 1, /**< A 64 bit unsigned integer value is used as payload. */
|
| 372 |
+
NVTX_PAYLOAD_TYPE_INT64 = 2, /**< A 64 bit signed integer value is used as payload. */
|
| 373 |
+
NVTX_PAYLOAD_TYPE_DOUBLE = 3, /**< A 64 bit floating point value is used as payload. */
|
| 374 |
+
/* NVTX_VERSION_2 */
|
| 375 |
+
NVTX_PAYLOAD_TYPE_UNSIGNED_INT32 = 4, /**< A 32 bit floating point value is used as payload. */
|
| 376 |
+
NVTX_PAYLOAD_TYPE_INT32 = 5, /**< A 32 bit floating point value is used as payload. */
|
| 377 |
+
NVTX_PAYLOAD_TYPE_FLOAT = 6 /**< A 32 bit floating point value is used as payload. */
|
| 378 |
+
} nvtxPayloadType_t;
|
| 379 |
+
|
| 380 |
+
/** \brief Event Attribute Structure.
|
| 381 |
+
* \anchor EVENT_ATTRIBUTE_STRUCTURE
|
| 382 |
+
*
|
| 383 |
+
* This structure is used to describe the attributes of an event. The layout of
|
| 384 |
+
* the structure is defined by a specific version of the tools extension
|
| 385 |
+
* library and can change between different versions of the Tools Extension
|
| 386 |
+
* library.
|
| 387 |
+
*
|
| 388 |
+
* \par Initializing the Attributes
|
| 389 |
+
*
|
| 390 |
+
* The caller should always perform the following three tasks when using
|
| 391 |
+
* attributes:
|
| 392 |
+
* <ul>
|
| 393 |
+
* <li>Zero the structure
|
| 394 |
+
* <li>Set the version field
|
| 395 |
+
* <li>Set the size field
|
| 396 |
+
* </ul>
|
| 397 |
+
*
|
| 398 |
+
* Zeroing the structure sets all the event attributes types and values
|
| 399 |
+
* to the default value.
|
| 400 |
+
*
|
| 401 |
+
* The version and size field are used by the Tools Extension
|
| 402 |
+
* implementation to handle multiple versions of the attributes structure.
|
| 403 |
+
*
|
| 404 |
+
* It is recommended that the caller use one of the following to methods
|
| 405 |
+
* to initialize the event attributes structure:
|
| 406 |
+
*
|
| 407 |
+
* \par Method 1: Initializing nvtxEventAttributes for future compatibility
|
| 408 |
+
* \code
|
| 409 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 410 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 411 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 412 |
+
* \endcode
|
| 413 |
+
*
|
| 414 |
+
* \par Method 2: Initializing nvtxEventAttributes for a specific version
|
| 415 |
+
* \code
|
| 416 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 417 |
+
* eventAttrib.version = 1;
|
| 418 |
+
* eventAttrib.size = (uint16_t)(sizeof(nvtxEventAttributes_v1));
|
| 419 |
+
* \endcode
|
| 420 |
+
*
|
| 421 |
+
* If the caller uses Method 1 it is critical that the entire binary
|
| 422 |
+
* layout of the structure be configured to 0 so that all fields
|
| 423 |
+
* are initialized to the default value.
|
| 424 |
+
*
|
| 425 |
+
* The caller should either use both NVTX_VERSION and
|
| 426 |
+
* NVTX_EVENT_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
|
| 427 |
+
* and a versioned type (Method 2). Using a mix of the two methods
|
| 428 |
+
* will likely cause either source level incompatibility or binary
|
| 429 |
+
* incompatibility in the future.
|
| 430 |
+
*
|
| 431 |
+
* \par Settings Attribute Types and Values
|
| 432 |
+
*
|
| 433 |
+
*
|
| 434 |
+
* \par Example:
|
| 435 |
+
* \code
|
| 436 |
+
* // Initialize
|
| 437 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 438 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 439 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 440 |
+
*
|
| 441 |
+
* // Configure the Attributes
|
| 442 |
+
* eventAttrib.colorType = NVTX_COLOR_ARGB;
|
| 443 |
+
* eventAttrib.color = 0xFF880000;
|
| 444 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 445 |
+
* eventAttrib.message.ascii = "Example";
|
| 446 |
+
* \endcode
|
| 447 |
+
*
|
| 448 |
+
* In the example the caller does not have to set the value of
|
| 449 |
+
* \ref ::nvtxEventAttributes_v2::category or
|
| 450 |
+
* \ref ::nvtxEventAttributes_v2::payload as these fields were set to
|
| 451 |
+
* the default value by {0}.
|
| 452 |
+
* \sa
|
| 453 |
+
* ::nvtxDomainMarkEx
|
| 454 |
+
* ::nvtxDomainRangeStartEx
|
| 455 |
+
* ::nvtxDomainRangePushEx
|
| 456 |
+
*/
|
| 457 |
+
typedef struct nvtxEventAttributes_v2
|
| 458 |
+
{
|
| 459 |
+
/**
|
| 460 |
+
* \brief Version flag of the structure.
|
| 461 |
+
*
|
| 462 |
+
* Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
|
| 463 |
+
* supported in this header file. This can optionally be overridden to
|
| 464 |
+
* another version of the tools extension library.
|
| 465 |
+
*/
|
| 466 |
+
uint16_t version;
|
| 467 |
+
|
| 468 |
+
/**
|
| 469 |
+
* \brief Size of the structure.
|
| 470 |
+
*
|
| 471 |
+
* Needs to be set to the size in bytes of the event attribute
|
| 472 |
+
* structure used to specify the event.
|
| 473 |
+
*/
|
| 474 |
+
uint16_t size;
|
| 475 |
+
|
| 476 |
+
/**
|
| 477 |
+
* \brief ID of the category the event is assigned to.
|
| 478 |
+
*
|
| 479 |
+
* A category is a user-controlled ID that can be used to group
|
| 480 |
+
* events. The tool may use category IDs to improve filtering or
|
| 481 |
+
* enable grouping of events in the same category. The functions
|
| 482 |
+
* \ref ::nvtxNameCategoryA or \ref ::nvtxNameCategoryW can be used
|
| 483 |
+
* to name a category.
|
| 484 |
+
*
|
| 485 |
+
* Default Value is 0
|
| 486 |
+
*/
|
| 487 |
+
uint32_t category;
|
| 488 |
+
|
| 489 |
+
/** \brief Color type specified in this attribute structure.
|
| 490 |
+
*
|
| 491 |
+
* Defines the color format of the attribute structure's \ref COLOR_FIELD
|
| 492 |
+
* "color" field.
|
| 493 |
+
*
|
| 494 |
+
* Default Value is NVTX_COLOR_UNKNOWN
|
| 495 |
+
*/
|
| 496 |
+
int32_t colorType; /* nvtxColorType_t */
|
| 497 |
+
|
| 498 |
+
/** \brief Color assigned to this event. \anchor COLOR_FIELD
|
| 499 |
+
*
|
| 500 |
+
* The color that the tool should use to visualize the event.
|
| 501 |
+
*/
|
| 502 |
+
uint32_t color;
|
| 503 |
+
|
| 504 |
+
/**
|
| 505 |
+
* \brief Payload type specified in this attribute structure.
|
| 506 |
+
*
|
| 507 |
+
* Defines the payload format of the attribute structure's \ref PAYLOAD_FIELD
|
| 508 |
+
* "payload" field.
|
| 509 |
+
*
|
| 510 |
+
* Default Value is NVTX_PAYLOAD_UNKNOWN
|
| 511 |
+
*/
|
| 512 |
+
int32_t payloadType; /* nvtxPayloadType_t */
|
| 513 |
+
|
| 514 |
+
int32_t reserved0;
|
| 515 |
+
|
| 516 |
+
/**
|
| 517 |
+
* \brief Payload assigned to this event. \anchor PAYLOAD_FIELD
|
| 518 |
+
*
|
| 519 |
+
* A numerical value that can be used to annotate an event. The tool could
|
| 520 |
+
* use the payload data to reconstruct graphs and diagrams.
|
| 521 |
+
*/
|
| 522 |
+
union payload_t
|
| 523 |
+
{
|
| 524 |
+
uint64_t ullValue;
|
| 525 |
+
int64_t llValue;
|
| 526 |
+
double dValue;
|
| 527 |
+
/* NVTX_VERSION_2 */
|
| 528 |
+
uint32_t uiValue;
|
| 529 |
+
int32_t iValue;
|
| 530 |
+
float fValue;
|
| 531 |
+
} payload;
|
| 532 |
+
|
| 533 |
+
/** \brief Message type specified in this attribute structure.
|
| 534 |
+
*
|
| 535 |
+
* Defines the message format of the attribute structure's \ref MESSAGE_FIELD
|
| 536 |
+
* "message" field.
|
| 537 |
+
*
|
| 538 |
+
* Default Value is NVTX_MESSAGE_UNKNOWN
|
| 539 |
+
*/
|
| 540 |
+
int32_t messageType; /* nvtxMessageType_t */
|
| 541 |
+
|
| 542 |
+
/** \brief Message assigned to this attribute structure. \anchor MESSAGE_FIELD
|
| 543 |
+
*
|
| 544 |
+
* The text message that is attached to an event.
|
| 545 |
+
*/
|
| 546 |
+
nvtxMessageValue_t message;
|
| 547 |
+
|
| 548 |
+
} nvtxEventAttributes_v2;
|
| 549 |
+
|
| 550 |
+
typedef struct nvtxEventAttributes_v2 nvtxEventAttributes_t;
|
| 551 |
+
|
| 552 |
+
/** @} */ /*END defgroup*/
|
| 553 |
+
/* ========================================================================= */
|
| 554 |
+
/** \defgroup MARKERS_AND_RANGES Markers and Ranges
|
| 555 |
+
*
|
| 556 |
+
* See \ref MARKERS_AND_RANGES for more details
|
| 557 |
+
*
|
| 558 |
+
* @{
|
| 559 |
+
*/
|
| 560 |
+
|
| 561 |
+
/** \name Marker */
|
| 562 |
+
|
| 563 |
+
/* ------------------------------------------------------------------------- */
|
| 564 |
+
/** \brief Marks an instantaneous event in the application.
|
| 565 |
+
*
|
| 566 |
+
* A marker can contain a text message or specify additional information
|
| 567 |
+
* using the event attributes structure. These attributes include a text
|
| 568 |
+
* message, color, category, and a payload. Each of the attributes is optional
|
| 569 |
+
* and can only be sent out using the \ref nvtxDomainMarkEx function.
|
| 570 |
+
*
|
| 571 |
+
* nvtxDomainMarkEx(NULL, event) is equivalent to calling
|
| 572 |
+
* nvtxMarkEx(event).
|
| 573 |
+
*
|
| 574 |
+
* \param domain - The domain of scoping the category.
|
| 575 |
+
* \param eventAttrib - The event attribute structure defining the marker's
|
| 576 |
+
* attribute types and attribute values.
|
| 577 |
+
*
|
| 578 |
+
* \sa
|
| 579 |
+
* ::nvtxMarkEx
|
| 580 |
+
*
|
| 581 |
+
* \version \NVTX_VERSION_2
|
| 582 |
+
* @{ */
|
| 583 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 584 |
+
/** @} */
|
| 585 |
+
|
| 586 |
+
/* ------------------------------------------------------------------------- */
|
| 587 |
+
/** \brief Marks an instantaneous event in the application.
|
| 588 |
+
*
|
| 589 |
+
* A marker can contain a text message or specify additional information
|
| 590 |
+
* using the event attributes structure. These attributes include a text
|
| 591 |
+
* message, color, category, and a payload. Each of the attributes is optional
|
| 592 |
+
* and can only be sent out using the \ref nvtxMarkEx function.
|
| 593 |
+
* If \ref nvtxMarkA or \ref nvtxMarkW are used to specify the marker
|
| 594 |
+
* or if an attribute is unspecified then a default value will be used.
|
| 595 |
+
*
|
| 596 |
+
* \param eventAttrib - The event attribute structure defining the marker's
|
| 597 |
+
* attribute types and attribute values.
|
| 598 |
+
*
|
| 599 |
+
* \par Example:
|
| 600 |
+
* \code
|
| 601 |
+
* // zero the structure
|
| 602 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 603 |
+
* // set the version and the size information
|
| 604 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 605 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 606 |
+
* // configure the attributes. 0 is the default for all attributes.
|
| 607 |
+
* eventAttrib.colorType = NVTX_COLOR_ARGB;
|
| 608 |
+
* eventAttrib.color = 0xFF880000;
|
| 609 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 610 |
+
* eventAttrib.message.ascii = "Example nvtxMarkEx";
|
| 611 |
+
* nvtxMarkEx(&eventAttrib);
|
| 612 |
+
* \endcode
|
| 613 |
+
*
|
| 614 |
+
* \sa
|
| 615 |
+
* ::nvtxDomainMarkEx
|
| 616 |
+
*
|
| 617 |
+
* \version \NVTX_VERSION_1
|
| 618 |
+
* @{ */
|
| 619 |
+
NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib);
|
| 620 |
+
/** @} */
|
| 621 |
+
|
| 622 |
+
/* ------------------------------------------------------------------------- */
|
| 623 |
+
/** \brief Marks an instantaneous event in the application.
|
| 624 |
+
*
|
| 625 |
+
* A marker created using \ref nvtxMarkA or \ref nvtxMarkW contains only a
|
| 626 |
+
* text message.
|
| 627 |
+
*
|
| 628 |
+
* \param message - The message associated to this marker event.
|
| 629 |
+
*
|
| 630 |
+
* \par Example:
|
| 631 |
+
* \code
|
| 632 |
+
* nvtxMarkA("Example nvtxMarkA");
|
| 633 |
+
* nvtxMarkW(L"Example nvtxMarkW");
|
| 634 |
+
* \endcode
|
| 635 |
+
*
|
| 636 |
+
* \sa
|
| 637 |
+
* ::nvtxDomainMarkEx
|
| 638 |
+
* ::nvtxMarkEx
|
| 639 |
+
*
|
| 640 |
+
* \version \NVTX_VERSION_0
|
| 641 |
+
* @{ */
|
| 642 |
+
NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message);
|
| 643 |
+
NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message);
|
| 644 |
+
/** @} */
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
/** \name Process Ranges */
|
| 648 |
+
|
| 649 |
+
/* ------------------------------------------------------------------------- */
|
| 650 |
+
/** \brief Starts a process range in a domain.
|
| 651 |
+
*
|
| 652 |
+
* \param domain - The domain of scoping the category.
|
| 653 |
+
* \param eventAttrib - The event attribute structure defining the range's
|
| 654 |
+
* attribute types and attribute values.
|
| 655 |
+
*
|
| 656 |
+
* \return The unique ID used to correlate a pair of Start and End events.
|
| 657 |
+
*
|
| 658 |
+
* \remarks Ranges defined by Start/End can overlap.
|
| 659 |
+
*
|
| 660 |
+
* \par Example:
|
| 661 |
+
* \code
|
| 662 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain");
|
| 663 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 664 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 665 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 666 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 667 |
+
* eventAttrib.message.ascii = "my range";
|
| 668 |
+
* nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib);
|
| 669 |
+
* // ...
|
| 670 |
+
* nvtxDomainRangeEnd(rangeId);
|
| 671 |
+
* \endcode
|
| 672 |
+
*
|
| 673 |
+
* \sa
|
| 674 |
+
* ::nvtxDomainRangeEnd
|
| 675 |
+
*
|
| 676 |
+
* \version \NVTX_VERSION_2
|
| 677 |
+
* @{ */
|
| 678 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 679 |
+
/** @} */
|
| 680 |
+
|
| 681 |
+
/* ------------------------------------------------------------------------- */
|
| 682 |
+
/** \brief Starts a process range.
|
| 683 |
+
*
|
| 684 |
+
* \param eventAttrib - The event attribute structure defining the range's
|
| 685 |
+
* attribute types and attribute values.
|
| 686 |
+
*
|
| 687 |
+
* \return The unique ID used to correlate a pair of Start and End events.
|
| 688 |
+
*
|
| 689 |
+
* \remarks Ranges defined by Start/End can overlap.
|
| 690 |
+
*
|
| 691 |
+
* \par Example:
|
| 692 |
+
* \code
|
| 693 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 694 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 695 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 696 |
+
* eventAttrib.category = 3;
|
| 697 |
+
* eventAttrib.colorType = NVTX_COLOR_ARGB;
|
| 698 |
+
* eventAttrib.color = 0xFF0088FF;
|
| 699 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 700 |
+
* eventAttrib.message.ascii = "Example Range";
|
| 701 |
+
* nvtxRangeId_t rangeId = nvtxRangeStartEx(&eventAttrib);
|
| 702 |
+
* // ...
|
| 703 |
+
* nvtxRangeEnd(rangeId);
|
| 704 |
+
* \endcode
|
| 705 |
+
*
|
| 706 |
+
* \sa
|
| 707 |
+
* ::nvtxRangeEnd
|
| 708 |
+
* ::nvtxDomainRangeStartEx
|
| 709 |
+
*
|
| 710 |
+
* \version \NVTX_VERSION_1
|
| 711 |
+
* @{ */
|
| 712 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib);
|
| 713 |
+
/** @} */
|
| 714 |
+
|
| 715 |
+
/* ------------------------------------------------------------------------- */
|
| 716 |
+
/** \brief Starts a process range.
|
| 717 |
+
*
|
| 718 |
+
* \param message - The event message associated to this range event.
|
| 719 |
+
*
|
| 720 |
+
* \return The unique ID used to correlate a pair of Start and End events.
|
| 721 |
+
*
|
| 722 |
+
* \remarks Ranges defined by Start/End can overlap.
|
| 723 |
+
*
|
| 724 |
+
* \par Example:
|
| 725 |
+
* \code
|
| 726 |
+
* nvtxRangeId_t r1 = nvtxRangeStartA("Range 1");
|
| 727 |
+
* nvtxRangeId_t r2 = nvtxRangeStartW(L"Range 2");
|
| 728 |
+
* nvtxRangeEnd(r1);
|
| 729 |
+
* nvtxRangeEnd(r2);
|
| 730 |
+
* \endcode
|
| 731 |
+
*
|
| 732 |
+
* \sa
|
| 733 |
+
* ::nvtxRangeEnd
|
| 734 |
+
* ::nvtxRangeStartEx
|
| 735 |
+
* ::nvtxDomainRangeStartEx
|
| 736 |
+
*
|
| 737 |
+
* \version \NVTX_VERSION_0
|
| 738 |
+
* @{ */
|
| 739 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message);
|
| 740 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message);
|
| 741 |
+
/** @} */
|
| 742 |
+
|
| 743 |
+
/* ------------------------------------------------------------------------- */
|
| 744 |
+
/** \brief Ends a process range.
|
| 745 |
+
*
|
| 746 |
+
* \param domain - The domain
|
| 747 |
+
* \param id - The correlation ID returned from a nvtxRangeStart call.
|
| 748 |
+
*
|
| 749 |
+
* \remarks This function is offered completeness but is an alias for ::nvtxRangeEnd.
|
| 750 |
+
* It does not need a domain param since that is associated iwth the range ID at ::nvtxDomainRangeStartEx
|
| 751 |
+
*
|
| 752 |
+
* \par Example:
|
| 753 |
+
* \code
|
| 754 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("my domain");
|
| 755 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 756 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 757 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 758 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 759 |
+
* eventAttrib.message.ascii = "my range";
|
| 760 |
+
* nvtxRangeId_t rangeId = nvtxDomainRangeStartEx(&eventAttrib);
|
| 761 |
+
* // ...
|
| 762 |
+
* nvtxDomainRangeEnd(rangeId);
|
| 763 |
+
* \endcode
|
| 764 |
+
*
|
| 765 |
+
* \sa
|
| 766 |
+
* ::nvtxDomainRangeStartEx
|
| 767 |
+
*
|
| 768 |
+
* \version \NVTX_VERSION_2
|
| 769 |
+
* @{ */
|
| 770 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id);
|
| 771 |
+
/** @} */
|
| 772 |
+
|
| 773 |
+
/* ------------------------------------------------------------------------- */
|
| 774 |
+
/** \brief Ends a process range.
|
| 775 |
+
*
|
| 776 |
+
* \param id - The correlation ID returned from an nvtxRangeStart call.
|
| 777 |
+
*
|
| 778 |
+
* \sa
|
| 779 |
+
* ::nvtxDomainRangeStartEx
|
| 780 |
+
* ::nvtxRangeStartEx
|
| 781 |
+
* ::nvtxRangeStartA
|
| 782 |
+
* ::nvtxRangeStartW
|
| 783 |
+
*
|
| 784 |
+
* \version \NVTX_VERSION_0
|
| 785 |
+
* @{ */
|
| 786 |
+
NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id);
|
| 787 |
+
/** @} */
|
| 788 |
+
|
| 789 |
+
/** \name Thread Ranges */
|
| 790 |
+
|
| 791 |
+
/* ------------------------------------------------------------------------- */
|
| 792 |
+
/** \brief Starts a nested thread range.
|
| 793 |
+
*
|
| 794 |
+
* \param domain - The domain of scoping.
|
| 795 |
+
* \param eventAttrib - The event attribute structure defining the range's
|
| 796 |
+
* attribute types and attribute values.
|
| 797 |
+
*
|
| 798 |
+
* \return The 0 based level of range being started. This value is scoped to the domain.
|
| 799 |
+
* If an error occurs, a negative value is returned.
|
| 800 |
+
*
|
| 801 |
+
* \par Example:
|
| 802 |
+
* \code
|
| 803 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
|
| 804 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 805 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 806 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 807 |
+
* eventAttrib.colorType = NVTX_COLOR_ARGB;
|
| 808 |
+
* eventAttrib.color = 0xFFFF0000;
|
| 809 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 810 |
+
* eventAttrib.message.ascii = "Level 0";
|
| 811 |
+
* nvtxDomainRangePushEx(domain, &eventAttrib);
|
| 812 |
+
*
|
| 813 |
+
* // Re-use eventAttrib
|
| 814 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE;
|
| 815 |
+
* eventAttrib.message.unicode = L"Level 1";
|
| 816 |
+
* nvtxDomainRangePushEx(domain, &eventAttrib);
|
| 817 |
+
*
|
| 818 |
+
* nvtxDomainRangePop(domain); //level 1
|
| 819 |
+
* nvtxDomainRangePop(domain); //level 0
|
| 820 |
+
* \endcode
|
| 821 |
+
*
|
| 822 |
+
* \sa
|
| 823 |
+
* ::nvtxDomainRangePop
|
| 824 |
+
*
|
| 825 |
+
* \version \NVTX_VERSION_2
|
| 826 |
+
* @{ */
|
| 827 |
+
NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 828 |
+
/** @} */
|
| 829 |
+
|
| 830 |
+
/* ------------------------------------------------------------------------- */
|
| 831 |
+
/** \brief Starts a nested thread range.
|
| 832 |
+
*
|
| 833 |
+
* \param eventAttrib - The event attribute structure defining the range's
|
| 834 |
+
* attribute types and attribute values.
|
| 835 |
+
*
|
| 836 |
+
* \return The 0 based level of range being started. This level is per domain.
|
| 837 |
+
* If an error occurs a negative value is returned.
|
| 838 |
+
*
|
| 839 |
+
* \par Example:
|
| 840 |
+
* \code
|
| 841 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 842 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 843 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 844 |
+
* eventAttrib.colorType = NVTX_COLOR_ARGB;
|
| 845 |
+
* eventAttrib.color = 0xFFFF0000;
|
| 846 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 847 |
+
* eventAttrib.message.ascii = "Level 0";
|
| 848 |
+
* nvtxRangePushEx(&eventAttrib);
|
| 849 |
+
*
|
| 850 |
+
* // Re-use eventAttrib
|
| 851 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_UNICODE;
|
| 852 |
+
* eventAttrib.message.unicode = L"Level 1";
|
| 853 |
+
* nvtxRangePushEx(&eventAttrib);
|
| 854 |
+
*
|
| 855 |
+
* nvtxRangePop();
|
| 856 |
+
* nvtxRangePop();
|
| 857 |
+
* \endcode
|
| 858 |
+
*
|
| 859 |
+
* \sa
|
| 860 |
+
* ::nvtxDomainRangePushEx
|
| 861 |
+
* ::nvtxRangePop
|
| 862 |
+
*
|
| 863 |
+
* \version \NVTX_VERSION_1
|
| 864 |
+
* @{ */
|
| 865 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib);
|
| 866 |
+
/** @} */
|
| 867 |
+
|
| 868 |
+
/* ------------------------------------------------------------------------- */
|
| 869 |
+
/** \brief Starts a nested thread range.
|
| 870 |
+
*
|
| 871 |
+
* \param message - The event message associated to this range event.
|
| 872 |
+
*
|
| 873 |
+
* \return The 0 based level of range being started. If an error occurs a
|
| 874 |
+
* negative value is returned.
|
| 875 |
+
*
|
| 876 |
+
* \par Example:
|
| 877 |
+
* \code
|
| 878 |
+
* nvtxRangePushA("Level 0");
|
| 879 |
+
* nvtxRangePushW(L"Level 1");
|
| 880 |
+
* nvtxRangePop();
|
| 881 |
+
* nvtxRangePop();
|
| 882 |
+
* \endcode
|
| 883 |
+
*
|
| 884 |
+
* \sa
|
| 885 |
+
* ::nvtxDomainRangePushEx
|
| 886 |
+
* ::nvtxRangePop
|
| 887 |
+
*
|
| 888 |
+
* \version \NVTX_VERSION_0
|
| 889 |
+
* @{ */
|
| 890 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message);
|
| 891 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message);
|
| 892 |
+
/** @} */
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
/* ------------------------------------------------------------------------- */
|
| 896 |
+
/** \brief Ends a nested thread range.
|
| 897 |
+
*
|
| 898 |
+
* \return The level of the range being ended. If an error occurs a negative
|
| 899 |
+
* value is returned on the current thread.
|
| 900 |
+
*
|
| 901 |
+
* \par Example:
|
| 902 |
+
* \code
|
| 903 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreate("example library");
|
| 904 |
+
* nvtxDomainRangePushA(domain, "Level 0");
|
| 905 |
+
* nvtxDomainRangePushW(domain, L"Level 1");
|
| 906 |
+
* nvtxDomainRangePop(domain);
|
| 907 |
+
* nvtxDomainRangePop(domain);
|
| 908 |
+
* \endcode
|
| 909 |
+
*
|
| 910 |
+
* \sa
|
| 911 |
+
* ::nvtxRangePushEx
|
| 912 |
+
* ::nvtxRangePushA
|
| 913 |
+
* ::nvtxRangePushW
|
| 914 |
+
*
|
| 915 |
+
* \version \NVTX_VERSION_2
|
| 916 |
+
* @{ */
|
| 917 |
+
NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain);
|
| 918 |
+
/** @} */
|
| 919 |
+
|
| 920 |
+
/* ------------------------------------------------------------------------- */
|
| 921 |
+
/** \brief Ends a nested thread range.
|
| 922 |
+
*
|
| 923 |
+
* \return The level of the range being ended. If an error occurs a negative
|
| 924 |
+
* value is returned on the current thread.
|
| 925 |
+
*
|
| 926 |
+
* \par Example:
|
| 927 |
+
* \code
|
| 928 |
+
* nvtxRangePushA("Level 0");
|
| 929 |
+
* nvtxRangePushW(L"Level 1");
|
| 930 |
+
* nvtxRangePop();
|
| 931 |
+
* nvtxRangePop();
|
| 932 |
+
* \endcode
|
| 933 |
+
*
|
| 934 |
+
* \sa
|
| 935 |
+
* ::nvtxRangePushEx
|
| 936 |
+
* ::nvtxRangePushA
|
| 937 |
+
* ::nvtxRangePushW
|
| 938 |
+
*
|
| 939 |
+
* \version \NVTX_VERSION_0
|
| 940 |
+
* @{ */
|
| 941 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePop(void);
|
| 942 |
+
/** @} */
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
/** @} */ /*END defgroup*/
|
| 946 |
+
/* ========================================================================= */
|
| 947 |
+
/** \defgroup RESOURCE_NAMING Resource Naming
|
| 948 |
+
*
|
| 949 |
+
* See \ref RESOURCE_NAMING for more details
|
| 950 |
+
*
|
| 951 |
+
* @{
|
| 952 |
+
*/
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
/* ------------------------------------------------------------------------- */
|
| 956 |
+
/** \name Functions for Generic Resource Naming*/
|
| 957 |
+
/* ------------------------------------------------------------------------- */
|
| 958 |
+
|
| 959 |
+
/* ------------------------------------------------------------------------- */
|
| 960 |
+
/** \cond SHOW_HIDDEN
|
| 961 |
+
* \brief Resource typing helpers.
|
| 962 |
+
*
|
| 963 |
+
* Classes are used to make it easy to create a series of resource types
|
| 964 |
+
* per API without collisions
|
| 965 |
+
*/
|
| 966 |
+
#define NVTX_RESOURCE_MAKE_TYPE(CLASS, INDEX) ((((uint32_t)(NVTX_RESOURCE_CLASS_ ## CLASS))<<16)|((uint32_t)(INDEX)))
|
| 967 |
+
#define NVTX_RESOURCE_CLASS_GENERIC 1
|
| 968 |
+
/** \endcond */
|
| 969 |
+
|
| 970 |
+
/* ------------------------------------------------------------------------- */
|
| 971 |
+
/** \brief Generic resource type for when a resource class is not available.
|
| 972 |
+
*
|
| 973 |
+
* \sa
|
| 974 |
+
* ::nvtxDomainResourceCreate
|
| 975 |
+
*
|
| 976 |
+
* \version \NVTX_VERSION_2
|
| 977 |
+
*/
|
| 978 |
+
typedef enum nvtxResourceGenericType_t
|
| 979 |
+
{
|
| 980 |
+
NVTX_RESOURCE_TYPE_UNKNOWN = 0,
|
| 981 |
+
NVTX_RESOURCE_TYPE_GENERIC_POINTER = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 1), /**< Generic pointer assumed to have no collisions with other pointers. */
|
| 982 |
+
NVTX_RESOURCE_TYPE_GENERIC_HANDLE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 2), /**< Generic handle assumed to have no collisions with other handles. */
|
| 983 |
+
NVTX_RESOURCE_TYPE_GENERIC_THREAD_NATIVE = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 3), /**< OS native thread identifier. */
|
| 984 |
+
NVTX_RESOURCE_TYPE_GENERIC_THREAD_POSIX = NVTX_RESOURCE_MAKE_TYPE(GENERIC, 4) /**< POSIX pthread identifier. */
|
| 985 |
+
} nvtxResourceGenericType_t;
|
| 986 |
+
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
/** \brief Resource Attribute Structure.
|
| 990 |
+
* \anchor RESOURCE_ATTRIBUTE_STRUCTURE
|
| 991 |
+
*
|
| 992 |
+
* This structure is used to describe the attributes of a resource. The layout of
|
| 993 |
+
* the structure is defined by a specific version of the tools extension
|
| 994 |
+
* library and can change between different versions of the Tools Extension
|
| 995 |
+
* library.
|
| 996 |
+
*
|
| 997 |
+
* \par Initializing the Attributes
|
| 998 |
+
*
|
| 999 |
+
* The caller should always perform the following three tasks when using
|
| 1000 |
+
* attributes:
|
| 1001 |
+
* <ul>
|
| 1002 |
+
* <li>Zero the structure
|
| 1003 |
+
* <li>Set the version field
|
| 1004 |
+
* <li>Set the size field
|
| 1005 |
+
* </ul>
|
| 1006 |
+
*
|
| 1007 |
+
* Zeroing the structure sets all the resource attributes types and values
|
| 1008 |
+
* to the default value.
|
| 1009 |
+
*
|
| 1010 |
+
* The version and size field are used by the Tools Extension
|
| 1011 |
+
* implementation to handle multiple versions of the attributes structure.
|
| 1012 |
+
*
|
| 1013 |
+
* It is recommended that the caller use one of the following to methods
|
| 1014 |
+
* to initialize the event attributes structure:
|
| 1015 |
+
*
|
| 1016 |
+
* \par Method 1: Initializing nvtxEventAttributes for future compatibility
|
| 1017 |
+
* \code
|
| 1018 |
+
* nvtxResourceAttributes_t attribs = {0};
|
| 1019 |
+
* attribs.version = NVTX_VERSION;
|
| 1020 |
+
* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
|
| 1021 |
+
* \endcode
|
| 1022 |
+
*
|
| 1023 |
+
* \par Method 2: Initializing nvtxEventAttributes for a specific version
|
| 1024 |
+
* \code
|
| 1025 |
+
* nvtxResourceAttributes_v0 attribs = {0};
|
| 1026 |
+
* attribs.version = 2;
|
| 1027 |
+
* attribs.size = (uint16_t)(sizeof(nvtxResourceAttributes_v0));
|
| 1028 |
+
* \endcode
|
| 1029 |
+
*
|
| 1030 |
+
* If the caller uses Method 1 it is critical that the entire binary
|
| 1031 |
+
* layout of the structure be configured to 0 so that all fields
|
| 1032 |
+
* are initialized to the default value.
|
| 1033 |
+
*
|
| 1034 |
+
* The caller should either use both NVTX_VERSION and
|
| 1035 |
+
* NVTX_RESOURCE_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
|
| 1036 |
+
* and a versioned type (Method 2). Using a mix of the two methods
|
| 1037 |
+
* will likely cause either source level incompatibility or binary
|
| 1038 |
+
* incompatibility in the future.
|
| 1039 |
+
*
|
| 1040 |
+
* \par Settings Attribute Types and Values
|
| 1041 |
+
*
|
| 1042 |
+
*
|
| 1043 |
+
* \par Example:
|
| 1044 |
+
* \code
|
| 1045 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
|
| 1046 |
+
*
|
| 1047 |
+
* // Initialize
|
| 1048 |
+
* nvtxResourceAttributes_t attribs = {0};
|
| 1049 |
+
* attribs.version = NVTX_VERSION;
|
| 1050 |
+
* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
|
| 1051 |
+
*
|
| 1052 |
+
* // Configure the Attributes
|
| 1053 |
+
* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
|
| 1054 |
+
* attribs.identifier.pValue = (const void*)pMutex;
|
| 1055 |
+
* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 1056 |
+
* attribs.message.ascii = "Single thread access to database.";
|
| 1057 |
+
*
|
| 1058 |
+
* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
|
| 1059 |
+
* \endcode
|
| 1060 |
+
*
|
| 1061 |
+
* \sa
|
| 1062 |
+
* ::nvtxDomainResourceCreate
|
| 1063 |
+
*/
|
| 1064 |
+
typedef struct nvtxResourceAttributes_v0
|
| 1065 |
+
{
|
| 1066 |
+
/**
|
| 1067 |
+
* \brief Version flag of the structure.
|
| 1068 |
+
*
|
| 1069 |
+
* Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
|
| 1070 |
+
* supported in this header file. This can optionally be overridden to
|
| 1071 |
+
* another version of the tools extension library.
|
| 1072 |
+
*/
|
| 1073 |
+
uint16_t version;
|
| 1074 |
+
|
| 1075 |
+
/**
|
| 1076 |
+
* \brief Size of the structure.
|
| 1077 |
+
*
|
| 1078 |
+
* Needs to be set to the size in bytes of this attribute
|
| 1079 |
+
* structure.
|
| 1080 |
+
*/
|
| 1081 |
+
uint16_t size;
|
| 1082 |
+
|
| 1083 |
+
/**
|
| 1084 |
+
* \brief Identifier type specifies how to interpret the identifier field
|
| 1085 |
+
*
|
| 1086 |
+
* Defines the identifier format of the attribute structure's \ref RESOURCE_IDENTIFIER_FIELD
|
| 1087 |
+
* "identifier" field.
|
| 1088 |
+
*
|
| 1089 |
+
* Default Value is NVTX_RESOURCE_TYPE_UNKNOWN
|
| 1090 |
+
*/
|
| 1091 |
+
int32_t identifierType; /* values from enums following the pattern nvtxResource[name]Type_t */
|
| 1092 |
+
|
| 1093 |
+
/**
|
| 1094 |
+
* \brief Identifier for the resource.
|
| 1095 |
+
* \anchor RESOURCE_IDENTIFIER_FIELD
|
| 1096 |
+
*
|
| 1097 |
+
* An identifier may be a pointer or a handle to an OS or middleware API object.
|
| 1098 |
+
* The resource type will assist in avoiding collisions where handles values may collide.
|
| 1099 |
+
*/
|
| 1100 |
+
union identifier_t
|
| 1101 |
+
{
|
| 1102 |
+
const void* pValue;
|
| 1103 |
+
uint64_t ullValue;
|
| 1104 |
+
} identifier;
|
| 1105 |
+
|
| 1106 |
+
/** \brief Message type specified in this attribute structure.
|
| 1107 |
+
*
|
| 1108 |
+
* Defines the message format of the attribute structure's \ref RESOURCE_MESSAGE_FIELD
|
| 1109 |
+
* "message" field.
|
| 1110 |
+
*
|
| 1111 |
+
* Default Value is NVTX_MESSAGE_UNKNOWN
|
| 1112 |
+
*/
|
| 1113 |
+
int32_t messageType; /* nvtxMessageType_t */
|
| 1114 |
+
|
| 1115 |
+
/** \brief Message assigned to this attribute structure. \anchor RESOURCE_MESSAGE_FIELD
|
| 1116 |
+
*
|
| 1117 |
+
* The text message that is attached to a resource.
|
| 1118 |
+
*/
|
| 1119 |
+
nvtxMessageValue_t message;
|
| 1120 |
+
|
| 1121 |
+
} nvtxResourceAttributes_v0;
|
| 1122 |
+
|
| 1123 |
+
typedef struct nvtxResourceAttributes_v0 nvtxResourceAttributes_t;
|
| 1124 |
+
|
| 1125 |
+
/* \cond SHOW_HIDDEN
|
| 1126 |
+
* \version \NVTX_VERSION_2
|
| 1127 |
+
*/
|
| 1128 |
+
#define NVTX_RESOURCE_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxResourceAttributes_v0) ) )
|
| 1129 |
+
typedef struct nvtxResourceHandle* nvtxResourceHandle_t;
|
| 1130 |
+
/** \endcond */
|
| 1131 |
+
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
/* ------------------------------------------------------------------------- */
|
| 1135 |
+
/** \brief Create a resource object to track and associate data with OS and middleware objects
|
| 1136 |
+
*
|
| 1137 |
+
* Allows users to associate an API handle or pointer with a user-provided name.
|
| 1138 |
+
*
|
| 1139 |
+
*
|
| 1140 |
+
* \param domain - Domain to own the resource object
|
| 1141 |
+
* \param attribs - Attributes to be associated with the resource
|
| 1142 |
+
*
|
| 1143 |
+
* \return A handle that represents the newly created resource object.
|
| 1144 |
+
*
|
| 1145 |
+
* \par Example:
|
| 1146 |
+
* \code
|
| 1147 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
|
| 1148 |
+
* nvtxResourceAttributes_t attribs = {0};
|
| 1149 |
+
* attribs.version = NVTX_VERSION;
|
| 1150 |
+
* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
|
| 1151 |
+
* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
|
| 1152 |
+
* attribs.identifier.pValue = (const void*)pMutex;
|
| 1153 |
+
* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 1154 |
+
* attribs.message.ascii = "Single thread access to database.";
|
| 1155 |
+
* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
|
| 1156 |
+
* \endcode
|
| 1157 |
+
*
|
| 1158 |
+
* \sa
|
| 1159 |
+
* ::nvtxResourceAttributes_t
|
| 1160 |
+
* ::nvtxDomainResourceDestroy
|
| 1161 |
+
*
|
| 1162 |
+
* \version \NVTX_VERSION_2
|
| 1163 |
+
* @{ */
|
| 1164 |
+
NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs);
|
| 1165 |
+
/** @} */
|
| 1166 |
+
|
| 1167 |
+
/* ------------------------------------------------------------------------- */
|
| 1168 |
+
/** \brief Destroy a resource object to track and associate data with OS and middleware objects
|
| 1169 |
+
*
|
| 1170 |
+
* Allows users to associate an API handle or pointer with a user-provided name.
|
| 1171 |
+
*
|
| 1172 |
+
* \param resource - Handle to the resource in which to operate.
|
| 1173 |
+
*
|
| 1174 |
+
* \par Example:
|
| 1175 |
+
* \code
|
| 1176 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("example domain");
|
| 1177 |
+
* nvtxResourceAttributes_t attribs = {0};
|
| 1178 |
+
* attribs.version = NVTX_VERSION;
|
| 1179 |
+
* attribs.size = NVTX_RESOURCE_ATTRIB_STRUCT_SIZE;
|
| 1180 |
+
* attribs.identifierType = NVTX_RESOURCE_TYPE_GENERIC_POINTER;
|
| 1181 |
+
* attribs.identifier.pValue = (const void*)pMutex;
|
| 1182 |
+
* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 1183 |
+
* attribs.message.ascii = "Single thread access to database.";
|
| 1184 |
+
* nvtxResourceHandle_t handle = nvtxDomainResourceCreate(domain, attribs);
|
| 1185 |
+
* nvtxDomainResourceDestroy(handle);
|
| 1186 |
+
* \endcode
|
| 1187 |
+
*
|
| 1188 |
+
* \sa
|
| 1189 |
+
* ::nvtxDomainResourceCreate
|
| 1190 |
+
*
|
| 1191 |
+
* \version \NVTX_VERSION_2
|
| 1192 |
+
* @{ */
|
| 1193 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource);
|
| 1194 |
+
/** @} */
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
/** \name Functions for NVTX Category Naming*/
|
| 1198 |
+
|
| 1199 |
+
/* ------------------------------------------------------------------------- */
|
| 1200 |
+
/**
|
| 1201 |
+
* \brief Annotate an NVTX category used within a domain.
|
| 1202 |
+
*
|
| 1203 |
+
* Categories are used to group sets of events. Each category is identified
|
| 1204 |
+
* through a unique ID and that ID is passed into any of the marker/range
|
| 1205 |
+
* events to assign that event to a specific category. The nvtxDomainNameCategory
|
| 1206 |
+
* function calls allow the user to assign a name to a category ID that is
|
| 1207 |
+
* specific to the domain.
|
| 1208 |
+
*
|
| 1209 |
+
* nvtxDomainNameCategory(NULL, category, name) is equivalent to calling
|
| 1210 |
+
* nvtxNameCategory(category, name).
|
| 1211 |
+
*
|
| 1212 |
+
* \param domain - The domain of scoping the category.
|
| 1213 |
+
* \param category - The category ID to name.
|
| 1214 |
+
* \param name - The name of the category.
|
| 1215 |
+
*
|
| 1216 |
+
* \remarks The category names are tracked per domain.
|
| 1217 |
+
*
|
| 1218 |
+
* \par Example:
|
| 1219 |
+
* \code
|
| 1220 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("example");
|
| 1221 |
+
* nvtxDomainNameCategoryA(domain, 1, "Memory Allocation");
|
| 1222 |
+
* nvtxDomainNameCategoryW(domain, 2, L"Memory Transfer");
|
| 1223 |
+
* \endcode
|
| 1224 |
+
*
|
| 1225 |
+
* \version \NVTX_VERSION_2
|
| 1226 |
+
* @{ */
|
| 1227 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name);
|
| 1228 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name);
|
| 1229 |
+
/** @} */
|
| 1230 |
+
|
| 1231 |
+
/** \brief Annotate an NVTX category.
|
| 1232 |
+
*
|
| 1233 |
+
* Categories are used to group sets of events. Each category is identified
|
| 1234 |
+
* through a unique ID and that ID is passed into any of the marker/range
|
| 1235 |
+
* events to assign that event to a specific category. The nvtxNameCategory
|
| 1236 |
+
* function calls allow the user to assign a name to a category ID.
|
| 1237 |
+
*
|
| 1238 |
+
* \param category - The category ID to name.
|
| 1239 |
+
* \param name - The name of the category.
|
| 1240 |
+
*
|
| 1241 |
+
* \remarks The category names are tracked per process.
|
| 1242 |
+
*
|
| 1243 |
+
* \par Example:
|
| 1244 |
+
* \code
|
| 1245 |
+
* nvtxNameCategory(1, "Memory Allocation");
|
| 1246 |
+
* nvtxNameCategory(2, "Memory Transfer");
|
| 1247 |
+
* nvtxNameCategory(3, "Memory Object Lifetime");
|
| 1248 |
+
* \endcode
|
| 1249 |
+
*
|
| 1250 |
+
* \version \NVTX_VERSION_1
|
| 1251 |
+
* @{ */
|
| 1252 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name);
|
| 1253 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name);
|
| 1254 |
+
/** @} */
|
| 1255 |
+
|
| 1256 |
+
/** \name Functions for OS Threads Naming*/
|
| 1257 |
+
|
| 1258 |
+
/* ------------------------------------------------------------------------- */
|
| 1259 |
+
/** \brief Annotate an OS thread.
|
| 1260 |
+
*
|
| 1261 |
+
* Allows the user to name an active thread of the current process. If an
|
| 1262 |
+
* invalid thread ID is provided or a thread ID from a different process is
|
| 1263 |
+
* used the behavior of the tool is implementation dependent.
|
| 1264 |
+
*
|
| 1265 |
+
* Tools expect thread ID to be a number that uniquely identifies the thread
|
| 1266 |
+
* at the time of the call. Note that a thread's ID can be reused after
|
| 1267 |
+
* it is destroyed. Tools may choose how to handle aliasing of thread IDs.
|
| 1268 |
+
*
|
| 1269 |
+
* POSIX pthread_t type returned by pthread_self() may not comply with these
|
| 1270 |
+
* expectations. Please use OS-specific thread ID instead of pthread_t.
|
| 1271 |
+
*
|
| 1272 |
+
* The thread name is associated to the default domain. To support domains
|
| 1273 |
+
* use resource objects via ::nvtxDomainResourceCreate.
|
| 1274 |
+
*
|
| 1275 |
+
* \param threadId - The ID of the thread to name.
|
| 1276 |
+
* \param name - The name of the thread.
|
| 1277 |
+
*
|
| 1278 |
+
* \par Examples:
|
| 1279 |
+
* MS Windows:
|
| 1280 |
+
* \code
|
| 1281 |
+
* #include <windows.h>
|
| 1282 |
+
* nvtxNameOsThread(GetCurrentThreadId(), "Current thread");
|
| 1283 |
+
* nvtxNameOsThread(GetThreadId(SomeThreadHandle), "Other thread");
|
| 1284 |
+
* \endcode
|
| 1285 |
+
*
|
| 1286 |
+
* Android:
|
| 1287 |
+
* \code
|
| 1288 |
+
* #include <unistd.h>
|
| 1289 |
+
* nvtxNameOsThreadA(gettid(), "Current thread");
|
| 1290 |
+
* nvtxNameOsThreadA(getpid(), "Main thread");
|
| 1291 |
+
* \endcode
|
| 1292 |
+
*
|
| 1293 |
+
* Linux:
|
| 1294 |
+
* \code
|
| 1295 |
+
* #include <sys/syscall.h>
|
| 1296 |
+
* nvtxNameOsThreadA(syscall(SYS_gettid), "Current thread");
|
| 1297 |
+
* \endcode
|
| 1298 |
+
* \code
|
| 1299 |
+
* #include <unistd.h>
|
| 1300 |
+
* nvtxNameOsThreadA(getpid(), "Main thread");
|
| 1301 |
+
* \endcode
|
| 1302 |
+
*
|
| 1303 |
+
* OS X:
|
| 1304 |
+
* \code
|
| 1305 |
+
* #include <sys/syscall.h>
|
| 1306 |
+
* nvtxNameOsThreadA(syscall(SYS_thread_selfid), "Current thread");
|
| 1307 |
+
* \endcode
|
| 1308 |
+
* \code
|
| 1309 |
+
* #include <pthread.h>
|
| 1310 |
+
* __uint64_t id;
|
| 1311 |
+
* pthread_threadid_np(pthread_self(), &id);
|
| 1312 |
+
* nvtxNameOsThreadA(id, "Current thread");
|
| 1313 |
+
* pthread_threadid_np(somePThreadId, &id);
|
| 1314 |
+
* nvtxNameOsThreadA(id, "Other thread");
|
| 1315 |
+
* \endcode
|
| 1316 |
+
*
|
| 1317 |
+
* \version \NVTX_VERSION_1
|
| 1318 |
+
* @{ */
|
| 1319 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name);
|
| 1320 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name);
|
| 1321 |
+
/** @} */
|
| 1322 |
+
|
| 1323 |
+
|
| 1324 |
+
/** @} */ /*END defgroup*/
|
| 1325 |
+
/* ========================================================================= */
|
| 1326 |
+
/** \defgroup STRING_REGISTRATION String Registration
|
| 1327 |
+
*
|
| 1328 |
+
* Registered strings are intended to increase performance by lowering instrumentation
|
| 1329 |
+
* overhead. String may be registered once and the handle may be passed in place of
|
| 1330 |
+
* a string where an the APIs may allow.
|
| 1331 |
+
*
|
| 1332 |
+
* See \ref STRING_REGISTRATION for more details
|
| 1333 |
+
*
|
| 1334 |
+
* @{
|
| 1335 |
+
*/
|
| 1336 |
+
|
| 1337 |
+
/* ------------------------------------------------------------------------- */
|
| 1338 |
+
/** \brief Register a string.
|
| 1339 |
+
|
| 1340 |
+
* Registers an immutable string with NVTX. Once registered the pointer used
|
| 1341 |
+
* to register the domain name can be used in nvtxEventAttributes_t
|
| 1342 |
+
* \ref MESSAGE_FIELD. This allows NVTX implementation to skip copying the
|
| 1343 |
+
* contents of the message on each event invocation.
|
| 1344 |
+
*
|
| 1345 |
+
* String registration is an optimization. It is recommended to use string
|
| 1346 |
+
* registration if the string will be passed to an event many times.
|
| 1347 |
+
*
|
| 1348 |
+
* String are not unregistered, except that by unregistering the entire domain
|
| 1349 |
+
*
|
| 1350 |
+
* \param domain - Domain handle. If NULL then the global domain is used.
|
| 1351 |
+
* \param string - A unique pointer to a sequence of characters.
|
| 1352 |
+
*
|
| 1353 |
+
* \return A handle representing the registered string.
|
| 1354 |
+
*
|
| 1355 |
+
* \par Example:
|
| 1356 |
+
* \code
|
| 1357 |
+
* nvtxDomainCreateA("com.nvidia.nvtx.example");
|
| 1358 |
+
* nvtxStringHandle_t message = nvtxDomainRegisterStringA(domain, "registered string");
|
| 1359 |
+
* nvtxEventAttributes_t eventAttrib = {0};
|
| 1360 |
+
* eventAttrib.version = NVTX_VERSION;
|
| 1361 |
+
* eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 1362 |
+
* eventAttrib.messageType = NVTX_MESSAGE_TYPE_REGISTERED;
|
| 1363 |
+
* eventAttrib.message.registered = message;
|
| 1364 |
+
* \endcode
|
| 1365 |
+
*
|
| 1366 |
+
* \version \NVTX_VERSION_2
|
| 1367 |
+
* @{ */
|
| 1368 |
+
NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string);
|
| 1369 |
+
NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string);
|
| 1370 |
+
/** @} */
|
| 1371 |
+
|
| 1372 |
+
/** @} */ /*END defgroup*/
|
| 1373 |
+
/* ========================================================================= */
|
| 1374 |
+
/** \defgroup DOMAINS Domains
|
| 1375 |
+
*
|
| 1376 |
+
* Domains are used to group events to a developer defined scope. Middleware
|
| 1377 |
+
* vendors may also scope their own events to avoid collisions with the
|
| 1378 |
+
* the application developer's events, so that the application developer may
|
| 1379 |
+
* inspect both parts and easily differentiate or filter them. By default
|
| 1380 |
+
* all events are scoped to a global domain where NULL is provided or when
|
| 1381 |
+
* using APIs provided b versions of NVTX below v2
|
| 1382 |
+
*
|
| 1383 |
+
* Domains are intended to be typically long lived objects with the intention
|
| 1384 |
+
* of logically separating events of large modules from each other such as
|
| 1385 |
+
* middleware libraries from each other and the main application.
|
| 1386 |
+
*
|
| 1387 |
+
* See \ref DOMAINS for more details
|
| 1388 |
+
*
|
| 1389 |
+
* @{
|
| 1390 |
+
*/
|
| 1391 |
+
|
| 1392 |
+
/* ------------------------------------------------------------------------- */
|
| 1393 |
+
/** \brief Register a NVTX domain.
|
| 1394 |
+
*
|
| 1395 |
+
* Domains are used to scope annotations. All NVTX_VERSION_0 and NVTX_VERSION_1
|
| 1396 |
+
* annotations are scoped to the global domain. The function nvtxDomainCreate
|
| 1397 |
+
* creates a new named domain.
|
| 1398 |
+
*
|
| 1399 |
+
* Each domain maintains its own nvtxRangePush and nvtxRangePop stack.
|
| 1400 |
+
*
|
| 1401 |
+
* \param name - A unique string representing the domain.
|
| 1402 |
+
*
|
| 1403 |
+
* \return A handle representing the domain.
|
| 1404 |
+
*
|
| 1405 |
+
* \par Example:
|
| 1406 |
+
* \code
|
| 1407 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example");
|
| 1408 |
+
*
|
| 1409 |
+
* nvtxMarkA("nvtxMarkA to global domain");
|
| 1410 |
+
*
|
| 1411 |
+
* nvtxEventAttributes_t eventAttrib1 = {0};
|
| 1412 |
+
* eventAttrib1.version = NVTX_VERSION;
|
| 1413 |
+
* eventAttrib1.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 1414 |
+
* eventAttrib1.message.ascii = "nvtxDomainMarkEx to global domain";
|
| 1415 |
+
* nvtxDomainMarkEx(NULL, &eventAttrib1);
|
| 1416 |
+
*
|
| 1417 |
+
* nvtxEventAttributes_t eventAttrib2 = {0};
|
| 1418 |
+
* eventAttrib2.version = NVTX_VERSION;
|
| 1419 |
+
* eventAttrib2.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
|
| 1420 |
+
* eventAttrib2.message.ascii = "nvtxDomainMarkEx to com.nvidia.nvtx.example";
|
| 1421 |
+
* nvtxDomainMarkEx(domain, &eventAttrib2);
|
| 1422 |
+
* nvtxDomainDestroy(domain);
|
| 1423 |
+
* \endcode
|
| 1424 |
+
*
|
| 1425 |
+
* \sa
|
| 1426 |
+
* ::nvtxDomainDestroy
|
| 1427 |
+
*
|
| 1428 |
+
* \version \NVTX_VERSION_2
|
| 1429 |
+
* @{ */
|
| 1430 |
+
NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* name);
|
| 1431 |
+
NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* name);
|
| 1432 |
+
/** @} */
|
| 1433 |
+
|
| 1434 |
+
/* ------------------------------------------------------------------------- */
|
| 1435 |
+
/** \brief Unregister a NVTX domain.
|
| 1436 |
+
*
|
| 1437 |
+
* Unregisters the domain handle and frees all domain specific resources.
|
| 1438 |
+
*
|
| 1439 |
+
* \param domain - the domain handle
|
| 1440 |
+
*
|
| 1441 |
+
* \par Example:
|
| 1442 |
+
* \code
|
| 1443 |
+
* nvtxDomainHandle_t domain = nvtxDomainCreateA("com.nvidia.nvtx.example");
|
| 1444 |
+
* nvtxDomainDestroy(domain);
|
| 1445 |
+
* \endcode
|
| 1446 |
+
*
|
| 1447 |
+
* \sa
|
| 1448 |
+
* ::nvtxDomainCreateA
|
| 1449 |
+
* ::nvtxDomainCreateW
|
| 1450 |
+
*
|
| 1451 |
+
* \version \NVTX_VERSION_2
|
| 1452 |
+
* @{ */
|
| 1453 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain);
|
| 1454 |
+
/** @} */
|
| 1455 |
+
|
| 1456 |
+
|
| 1457 |
+
/** @} */ /*END defgroup*/
|
| 1458 |
+
/* ========================================================================= */
|
| 1459 |
+
/** \cond SHOW_HIDDEN */
|
| 1460 |
+
|
| 1461 |
+
#ifdef UNICODE
|
| 1462 |
+
#define nvtxMark nvtxMarkW
|
| 1463 |
+
#define nvtxRangeStart nvtxRangeStartW
|
| 1464 |
+
#define nvtxRangePush nvtxRangePushW
|
| 1465 |
+
#define nvtxNameCategory nvtxNameCategoryW
|
| 1466 |
+
#define nvtxNameOsThread nvtxNameOsThreadW
|
| 1467 |
+
/* NVTX_VERSION_2 */
|
| 1468 |
+
#define nvtxDomainCreate nvtxDomainCreateW
|
| 1469 |
+
#define nvtxDomainRegisterString nvtxDomainRegisterStringW
|
| 1470 |
+
#define nvtxDomainNameCategory nvtxDomainNameCategoryW
|
| 1471 |
+
#else
|
| 1472 |
+
#define nvtxMark nvtxMarkA
|
| 1473 |
+
#define nvtxRangeStart nvtxRangeStartA
|
| 1474 |
+
#define nvtxRangePush nvtxRangePushA
|
| 1475 |
+
#define nvtxNameCategory nvtxNameCategoryA
|
| 1476 |
+
#define nvtxNameOsThread nvtxNameOsThreadA
|
| 1477 |
+
/* NVTX_VERSION_2 */
|
| 1478 |
+
#define nvtxDomainCreate nvtxDomainCreateA
|
| 1479 |
+
#define nvtxDomainRegisterString nvtxDomainRegisterStringA
|
| 1480 |
+
#define nvtxDomainNameCategory nvtxDomainNameCategoryA
|
| 1481 |
+
#endif
|
| 1482 |
+
|
| 1483 |
+
/** \endcond */
|
| 1484 |
+
|
| 1485 |
+
#ifdef __cplusplus
|
| 1486 |
+
} /* extern "C" */
|
| 1487 |
+
#endif /* __cplusplus */
|
| 1488 |
+
|
| 1489 |
+
#define NVTX_IMPL_GUARD /* Ensure other headers cannot included directly */
|
| 1490 |
+
|
| 1491 |
+
#include "nvtxDetail/nvtxTypes.h"
|
| 1492 |
+
|
| 1493 |
+
#ifndef NVTX_NO_IMPL
|
| 1494 |
+
#include "nvtxDetail/nvtxImpl.h"
|
| 1495 |
+
#endif /*NVTX_NO_IMPL*/
|
| 1496 |
+
|
| 1497 |
+
#undef NVTX_IMPL_GUARD
|
| 1498 |
+
|
| 1499 |
+
#endif /* !defined(NVTX_VERSION) */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCuda.h
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO USER:
|
| 5 |
+
*
|
| 6 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 11 |
+
* of a form of NVIDIA software license agreement.
|
| 12 |
+
*
|
| 13 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 14 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 15 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 16 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 17 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 18 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 19 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 20 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 21 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 22 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 23 |
+
*
|
| 24 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 25 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 26 |
+
* "commercial computer software" and "commercial computer software
|
| 27 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 28 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 29 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 30 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 31 |
+
* source code with only those rights set forth herein.
|
| 32 |
+
*
|
| 33 |
+
* Any use of this source code in individual and commercial software must
|
| 34 |
+
* include, in the user documentation and internal comments to the code,
|
| 35 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
#include "nvToolsExt.h"
|
| 39 |
+
|
| 40 |
+
#include "cuda.h"
|
| 41 |
+
|
| 42 |
+
#ifndef NVTOOLSEXT_CUDA_V3
|
| 43 |
+
#define NVTOOLSEXT_CUDA_V3
|
| 44 |
+
|
| 45 |
+
#ifdef __cplusplus
|
| 46 |
+
extern "C" {
|
| 47 |
+
#endif /* __cplusplus */
|
| 48 |
+
|
| 49 |
+
/* ========================================================================= */
|
| 50 |
+
/** \name Functions for CUDA Resource Naming
|
| 51 |
+
*/
|
| 52 |
+
/** \addtogroup RESOURCE_NAMING
|
| 53 |
+
* \section RESOURCE_NAMING_CUDA CUDA Resource Naming
|
| 54 |
+
*
|
| 55 |
+
* This section covers the API functions that allow to annotate CUDA resources
|
| 56 |
+
* with user-provided names.
|
| 57 |
+
*
|
| 58 |
+
* @{
|
| 59 |
+
*/
|
| 60 |
+
|
| 61 |
+
/* ------------------------------------------------------------------------- */
|
| 62 |
+
/* \cond SHOW_HIDDEN
|
| 63 |
+
* \brief Used to build a non-colliding value for resource types separated class
|
| 64 |
+
* \version \NVTX_VERSION_2
|
| 65 |
+
*/
|
| 66 |
+
#define NVTX_RESOURCE_CLASS_CUDA 4
|
| 67 |
+
/** \endcond */
|
| 68 |
+
|
| 69 |
+
/* ------------------------------------------------------------------------- */
|
| 70 |
+
/** \brief Resource types for CUDA
|
| 71 |
+
*/
|
| 72 |
+
typedef enum nvtxResourceCUDAType_t
|
| 73 |
+
{
|
| 74 |
+
NVTX_RESOURCE_TYPE_CUDA_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDA, 1), /* CUdevice */
|
| 75 |
+
NVTX_RESOURCE_TYPE_CUDA_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 2), /* CUcontext */
|
| 76 |
+
NVTX_RESOURCE_TYPE_CUDA_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDA, 3), /* CUstream */
|
| 77 |
+
NVTX_RESOURCE_TYPE_CUDA_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDA, 4), /* CUevent */
|
| 78 |
+
} nvtxResourceCUDAType_t;
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
/* ------------------------------------------------------------------------- */
|
| 82 |
+
/** \brief Annotates a CUDA device.
|
| 83 |
+
*
|
| 84 |
+
* Allows the user to associate a CUDA device with a user-provided name.
|
| 85 |
+
*
|
| 86 |
+
* \param device - The handle of the CUDA device to name.
|
| 87 |
+
* \param name - The name of the CUDA device.
|
| 88 |
+
*
|
| 89 |
+
* \version \NVTX_VERSION_1
|
| 90 |
+
* @{ */
|
| 91 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name);
|
| 92 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name);
|
| 93 |
+
/** @} */
|
| 94 |
+
|
| 95 |
+
/* ------------------------------------------------------------------------- */
|
| 96 |
+
/** \brief Annotates a CUDA context.
|
| 97 |
+
*
|
| 98 |
+
* Allows the user to associate a CUDA context with a user-provided name.
|
| 99 |
+
*
|
| 100 |
+
* \param context - The handle of the CUDA context to name.
|
| 101 |
+
* \param name - The name of the CUDA context.
|
| 102 |
+
*
|
| 103 |
+
* \par Example:
|
| 104 |
+
* \code
|
| 105 |
+
* CUresult status = cuCtxCreate( &cuContext, 0, cuDevice );
|
| 106 |
+
* if ( CUDA_SUCCESS != status )
|
| 107 |
+
* goto Error;
|
| 108 |
+
* nvtxNameCuContext(cuContext, "CTX_NAME");
|
| 109 |
+
* \endcode
|
| 110 |
+
*
|
| 111 |
+
* \version \NVTX_VERSION_1
|
| 112 |
+
* @{ */
|
| 113 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name);
|
| 114 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name);
|
| 115 |
+
/** @} */
|
| 116 |
+
|
| 117 |
+
/* ------------------------------------------------------------------------- */
|
| 118 |
+
/** \brief Annotates a CUDA stream.
|
| 119 |
+
*
|
| 120 |
+
* Allows the user to associate a CUDA stream with a user-provided name.
|
| 121 |
+
*
|
| 122 |
+
* \param stream - The handle of the CUDA stream to name.
|
| 123 |
+
* \param name - The name of the CUDA stream.
|
| 124 |
+
*
|
| 125 |
+
* \version \NVTX_VERSION_1
|
| 126 |
+
* @{ */
|
| 127 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name);
|
| 128 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name);
|
| 129 |
+
/** @} */
|
| 130 |
+
|
| 131 |
+
/* ------------------------------------------------------------------------- */
|
| 132 |
+
/** \brief Annotates a CUDA event.
|
| 133 |
+
*
|
| 134 |
+
* Allows the user to associate a CUDA event with a user-provided name.
|
| 135 |
+
*
|
| 136 |
+
* \param event - The handle of the CUDA event to name.
|
| 137 |
+
* \param name - The name of the CUDA event.
|
| 138 |
+
*
|
| 139 |
+
* \version \NVTX_VERSION_1
|
| 140 |
+
* @{ */
|
| 141 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name);
|
| 142 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name);
|
| 143 |
+
/** @} */
|
| 144 |
+
|
| 145 |
+
/** @} */ /* END RESOURCE_NAMING */
|
| 146 |
+
|
| 147 |
+
/* ========================================================================= */
|
| 148 |
+
#ifdef UNICODE
|
| 149 |
+
#define nvtxNameCuDevice nvtxNameCuDeviceW
|
| 150 |
+
#define nvtxNameCuContext nvtxNameCuContextW
|
| 151 |
+
#define nvtxNameCuStream nvtxNameCuStreamW
|
| 152 |
+
#define nvtxNameCuEvent nvtxNameCuEventW
|
| 153 |
+
#else
|
| 154 |
+
#define nvtxNameCuDevice nvtxNameCuDeviceA
|
| 155 |
+
#define nvtxNameCuContext nvtxNameCuContextA
|
| 156 |
+
#define nvtxNameCuStream nvtxNameCuStreamA
|
| 157 |
+
#define nvtxNameCuEvent nvtxNameCuEventA
|
| 158 |
+
#endif
|
| 159 |
+
|
| 160 |
+
#ifdef __cplusplus
|
| 161 |
+
}
|
| 162 |
+
#endif /* __cplusplus */
|
| 163 |
+
|
| 164 |
+
#ifndef NVTX_NO_IMPL
|
| 165 |
+
#define NVTX_IMPL_GUARD_CUDA /* Ensure other headers cannot included directly */
|
| 166 |
+
#include "nvtxDetail/nvtxImplCuda_v3.h"
|
| 167 |
+
#undef NVTX_IMPL_GUARD_CUDA
|
| 168 |
+
#endif /*NVTX_NO_IMPL*/
|
| 169 |
+
|
| 170 |
+
#endif /* NVTOOLSEXT_CUDA_V3 */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtCudaRt.h
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO USER:
|
| 5 |
+
*
|
| 6 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 11 |
+
* of a form of NVIDIA software license agreement.
|
| 12 |
+
*
|
| 13 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 14 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 15 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 16 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 17 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 18 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 19 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 20 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 21 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 22 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 23 |
+
*
|
| 24 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 25 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 26 |
+
* "commercial computer software" and "commercial computer software
|
| 27 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 28 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 29 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 30 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 31 |
+
* source code with only those rights set forth herein.
|
| 32 |
+
*
|
| 33 |
+
* Any use of this source code in individual and commercial software must
|
| 34 |
+
* include, in the user documentation and internal comments to the code,
|
| 35 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
#include "nvToolsExt.h"
|
| 39 |
+
|
| 40 |
+
#include "cuda.h"
|
| 41 |
+
#include "driver_types.h"
|
| 42 |
+
|
| 43 |
+
#ifndef NVTOOLSEXT_CUDART_V3
|
| 44 |
+
#define NVTOOLSEXT_CUDART_V3
|
| 45 |
+
|
| 46 |
+
#ifdef __cplusplus
|
| 47 |
+
extern "C" {
|
| 48 |
+
#endif /* __cplusplus */
|
| 49 |
+
|
| 50 |
+
/* ========================================================================= */
|
| 51 |
+
/** \name Functions for CUDA Resource Naming
|
| 52 |
+
*/
|
| 53 |
+
/** \addtogroup RESOURCE_NAMING
|
| 54 |
+
* \section RESOURCE_NAMING_CUDART CUDA Runtime Resource Naming
|
| 55 |
+
*
|
| 56 |
+
* This section covers the API functions that allow to annotate CUDA resources
|
| 57 |
+
* with user-provided names.
|
| 58 |
+
*
|
| 59 |
+
* @{
|
| 60 |
+
*/
|
| 61 |
+
|
| 62 |
+
/* ------------------------------------------------------------------------- */
|
| 63 |
+
/* \cond SHOW_HIDDEN
|
| 64 |
+
* \brief Used to build a non-colliding value for resource types separated class
|
| 65 |
+
* \version \NVTX_VERSION_2
|
| 66 |
+
*/
|
| 67 |
+
#define NVTX_RESOURCE_CLASS_CUDART 5
|
| 68 |
+
/** \endcond */
|
| 69 |
+
|
| 70 |
+
/* ------------------------------------------------------------------------- */
|
| 71 |
+
/** \brief Resource types for CUDART
|
| 72 |
+
*/
|
| 73 |
+
typedef enum nvtxResourceCUDARTType_t
|
| 74 |
+
{
|
| 75 |
+
NVTX_RESOURCE_TYPE_CUDART_DEVICE = NVTX_RESOURCE_MAKE_TYPE(CUDART, 0), /* int device */
|
| 76 |
+
NVTX_RESOURCE_TYPE_CUDART_STREAM = NVTX_RESOURCE_MAKE_TYPE(CUDART, 1), /* cudaStream_t */
|
| 77 |
+
NVTX_RESOURCE_TYPE_CUDART_EVENT = NVTX_RESOURCE_MAKE_TYPE(CUDART, 2), /* cudaEvent_t */
|
| 78 |
+
} nvtxResourceCUDARTType_t;
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
/* ------------------------------------------------------------------------- */
|
| 82 |
+
/** \brief Annotates a CUDA device.
|
| 83 |
+
*
|
| 84 |
+
* Allows the user to associate a CUDA device with a user-provided name.
|
| 85 |
+
*
|
| 86 |
+
* \param device - The id of the CUDA device to name.
|
| 87 |
+
* \param name - The name of the CUDA device.
|
| 88 |
+
*
|
| 89 |
+
* \version \NVTX_VERSION_1
|
| 90 |
+
* @{ */
|
| 91 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceA(int device, const char* name);
|
| 92 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceW(int device, const wchar_t* name);
|
| 93 |
+
/** @} */
|
| 94 |
+
|
| 95 |
+
/* ------------------------------------------------------------------------- */
|
| 96 |
+
/** \brief Annotates a CUDA stream.
|
| 97 |
+
*
|
| 98 |
+
* Allows the user to associate a CUDA stream with a user-provided name.
|
| 99 |
+
*
|
| 100 |
+
* \param stream - The handle of the CUDA stream to name.
|
| 101 |
+
* \param name - The name of the CUDA stream.
|
| 102 |
+
*
|
| 103 |
+
* \version \NVTX_VERSION_1
|
| 104 |
+
* @{ */
|
| 105 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamA(cudaStream_t stream, const char* name);
|
| 106 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamW(cudaStream_t stream, const wchar_t* name);
|
| 107 |
+
/** @} */
|
| 108 |
+
|
| 109 |
+
/* ------------------------------------------------------------------------- */
|
| 110 |
+
/** \brief Annotates a CUDA event.
|
| 111 |
+
*
|
| 112 |
+
* Allows the user to associate a CUDA event with a user-provided name.
|
| 113 |
+
*
|
| 114 |
+
* \param event - The handle of the CUDA event to name.
|
| 115 |
+
* \param name - The name of the CUDA event.
|
| 116 |
+
*
|
| 117 |
+
* \version \NVTX_VERSION_1
|
| 118 |
+
* @{ */
|
| 119 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventA(cudaEvent_t event, const char* name);
|
| 120 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventW(cudaEvent_t event, const wchar_t* name);
|
| 121 |
+
/** @} */
|
| 122 |
+
|
| 123 |
+
/** @} */ /* END RESOURCE_NAMING */
|
| 124 |
+
|
| 125 |
+
/* ========================================================================= */
|
| 126 |
+
#ifdef UNICODE
|
| 127 |
+
#define nvtxNameCudaDevice nvtxNameCudaDeviceW
|
| 128 |
+
#define nvtxNameCudaStream nvtxNameCudaStreamW
|
| 129 |
+
#define nvtxNameCudaEvent nvtxNameCudaEventW
|
| 130 |
+
#else
|
| 131 |
+
#define nvtxNameCudaDevice nvtxNameCudaDeviceA
|
| 132 |
+
#define nvtxNameCudaStream nvtxNameCudaStreamA
|
| 133 |
+
#define nvtxNameCudaEvent nvtxNameCudaEventA
|
| 134 |
+
#endif
|
| 135 |
+
|
| 136 |
+
#ifdef __cplusplus
|
| 137 |
+
}
|
| 138 |
+
#endif /* __cplusplus */
|
| 139 |
+
|
| 140 |
+
#ifndef NVTX_NO_IMPL
|
| 141 |
+
#define NVTX_IMPL_GUARD_CUDART /* Ensure other headers cannot included directly */
|
| 142 |
+
#include "nvtxDetail/nvtxImplCudaRt_v3.h"
|
| 143 |
+
#undef NVTX_IMPL_GUARD_CUDART
|
| 144 |
+
#endif /*NVTX_NO_IMPL*/
|
| 145 |
+
|
| 146 |
+
#endif /* NVTOOLSEXT_CUDART_V3 */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtOpenCL.h
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO USER:
|
| 5 |
+
*
|
| 6 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 11 |
+
* of a form of NVIDIA software license agreement.
|
| 12 |
+
*
|
| 13 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 14 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 15 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 16 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 17 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 18 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 19 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 20 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 21 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 22 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 23 |
+
*
|
| 24 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 25 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 26 |
+
* "commercial computer software" and "commercial computer software
|
| 27 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 28 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 29 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 30 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 31 |
+
* source code with only those rights set forth herein.
|
| 32 |
+
*
|
| 33 |
+
* Any use of this source code in individual and commercial software must
|
| 34 |
+
* include, in the user documentation and internal comments to the code,
|
| 35 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
#include "nvToolsExt.h"
|
| 39 |
+
|
| 40 |
+
#include <CL/cl.h>
|
| 41 |
+
|
| 42 |
+
#ifndef NVTOOLSEXT_OPENCL_V3
|
| 43 |
+
#define NVTOOLSEXT_OPENCL_V3
|
| 44 |
+
|
| 45 |
+
#ifdef __cplusplus
|
| 46 |
+
extern "C" {
|
| 47 |
+
#endif /* __cplusplus */
|
| 48 |
+
|
| 49 |
+
/* ========================================================================= */
|
| 50 |
+
/** \name Functions for OpenCL Resource Naming
|
| 51 |
+
*/
|
| 52 |
+
/** \addtogroup RESOURCE_NAMING
|
| 53 |
+
* \section RESOURCE_NAMING_OPENCL OpenCL Resource Naming
|
| 54 |
+
*
|
| 55 |
+
* This section covers the API functions that allow to annotate OpenCL resources
|
| 56 |
+
* with user-provided names.
|
| 57 |
+
*
|
| 58 |
+
* @{
|
| 59 |
+
*/
|
| 60 |
+
|
| 61 |
+
/* ------------------------------------------------------------------------- */
|
| 62 |
+
/* \cond SHOW_HIDDEN
|
| 63 |
+
* \brief Used to build a non-colliding value for resource types separated class
|
| 64 |
+
* \version \NVTX_VERSION_2
|
| 65 |
+
*/
|
| 66 |
+
#define NVTX_RESOURCE_CLASS_OPENCL 6
|
| 67 |
+
/** \endcond */
|
| 68 |
+
|
| 69 |
+
/* ------------------------------------------------------------------------- */
|
| 70 |
+
/** \brief Resource types for OpenCL
|
| 71 |
+
*/
|
| 72 |
+
typedef enum nvtxResourceOpenCLType_t
|
| 73 |
+
{
|
| 74 |
+
NVTX_RESOURCE_TYPE_OPENCL_DEVICE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 1),
|
| 75 |
+
NVTX_RESOURCE_TYPE_OPENCL_CONTEXT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 2),
|
| 76 |
+
NVTX_RESOURCE_TYPE_OPENCL_COMMANDQUEUE = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 3),
|
| 77 |
+
NVTX_RESOURCE_TYPE_OPENCL_MEMOBJECT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 4),
|
| 78 |
+
NVTX_RESOURCE_TYPE_OPENCL_SAMPLER = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 5),
|
| 79 |
+
NVTX_RESOURCE_TYPE_OPENCL_PROGRAM = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 6),
|
| 80 |
+
NVTX_RESOURCE_TYPE_OPENCL_EVENT = NVTX_RESOURCE_MAKE_TYPE(OPENCL, 7),
|
| 81 |
+
} nvtxResourceOpenCLType_t;
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
/* ------------------------------------------------------------------------- */
|
| 85 |
+
/** \brief Annotates an OpenCL device.
|
| 86 |
+
*
|
| 87 |
+
* Allows to associate an OpenCL device with a user-provided name.
|
| 88 |
+
*
|
| 89 |
+
* \param device - The handle of the OpenCL device to name.
|
| 90 |
+
* \param name - The name of the OpenCL device.
|
| 91 |
+
*
|
| 92 |
+
* \version \NVTX_VERSION_1
|
| 93 |
+
* @{ */
|
| 94 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceA(cl_device_id device, const char* name);
|
| 95 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceW(cl_device_id device, const wchar_t* name);
|
| 96 |
+
/** @} */
|
| 97 |
+
|
| 98 |
+
/* ------------------------------------------------------------------------- */
|
| 99 |
+
/** \brief Annotates an OpenCL context.
|
| 100 |
+
*
|
| 101 |
+
* Allows to associate an OpenCL context with a user-provided name.
|
| 102 |
+
*
|
| 103 |
+
* \param context - The handle of the OpenCL context to name.
|
| 104 |
+
* \param name - The name of the OpenCL context.
|
| 105 |
+
*
|
| 106 |
+
* \version \NVTX_VERSION_1
|
| 107 |
+
* @{ */
|
| 108 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClContextA(cl_context context, const char* name);
|
| 109 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClContextW(cl_context context, const wchar_t* name);
|
| 110 |
+
/** @} */
|
| 111 |
+
|
| 112 |
+
/* ------------------------------------------------------------------------- */
|
| 113 |
+
/** \brief Annotates an OpenCL command queue.
|
| 114 |
+
*
|
| 115 |
+
* Allows to associate an OpenCL command queue with a user-provided name.
|
| 116 |
+
*
|
| 117 |
+
* \param command_queue - The handle of the OpenCL command queue to name.
|
| 118 |
+
* \param name - The name of the OpenCL command queue.
|
| 119 |
+
*
|
| 120 |
+
* \version \NVTX_VERSION_1
|
| 121 |
+
* @{ */
|
| 122 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueA(cl_command_queue command_queue, const char* name);
|
| 123 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueW(cl_command_queue command_queue, const wchar_t* name);
|
| 124 |
+
/** @} */
|
| 125 |
+
|
| 126 |
+
/* ------------------------------------------------------------------------- */
|
| 127 |
+
/** \brief Annotates an OpenCL memory object.
|
| 128 |
+
*
|
| 129 |
+
* Allows to associate an OpenCL memory object with a user-provided name.
|
| 130 |
+
*
|
| 131 |
+
* \param memobj - The handle of the OpenCL memory object to name.
|
| 132 |
+
* \param name - The name of the OpenCL memory object.
|
| 133 |
+
*
|
| 134 |
+
* \version \NVTX_VERSION_1
|
| 135 |
+
* @{ */
|
| 136 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectA(cl_mem memobj, const char* name);
|
| 137 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectW(cl_mem memobj, const wchar_t* name);
|
| 138 |
+
/** @} */
|
| 139 |
+
|
| 140 |
+
/* ------------------------------------------------------------------------- */
|
| 141 |
+
/** \brief Annotates an OpenCL sampler.
|
| 142 |
+
*
|
| 143 |
+
* Allows to associate an OpenCL sampler with a user-provided name.
|
| 144 |
+
*
|
| 145 |
+
* \param sampler - The handle of the OpenCL sampler to name.
|
| 146 |
+
* \param name - The name of the OpenCL sampler.
|
| 147 |
+
*
|
| 148 |
+
* \version \NVTX_VERSION_1
|
| 149 |
+
* @{ */
|
| 150 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerA(cl_sampler sampler, const char* name);
|
| 151 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerW(cl_sampler sampler, const wchar_t* name);
|
| 152 |
+
/** @} */
|
| 153 |
+
|
| 154 |
+
/* ------------------------------------------------------------------------- */
|
| 155 |
+
/** \brief Annotates an OpenCL program.
|
| 156 |
+
*
|
| 157 |
+
* Allows to associate an OpenCL program with a user-provided name.
|
| 158 |
+
*
|
| 159 |
+
* \param program - The handle of the OpenCL program to name.
|
| 160 |
+
* \param name - The name of the OpenCL program.
|
| 161 |
+
*
|
| 162 |
+
* \code
|
| 163 |
+
* cpProgram = clCreateProgramWithSource(cxGPUContext, 1,
|
| 164 |
+
* (const char **) &cSourceCL, &program_length, &ciErrNum);
|
| 165 |
+
* shrCheckErrorEX(ciErrNum, CL_SUCCESS, pCleanup);
|
| 166 |
+
* nvtxNameClProgram(cpProgram, L"PROGRAM_NAME");
|
| 167 |
+
* \endcode
|
| 168 |
+
*
|
| 169 |
+
* \version \NVTX_VERSION_1
|
| 170 |
+
* @{ */
|
| 171 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClProgramA(cl_program program, const char* name);
|
| 172 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClProgramW(cl_program program, const wchar_t* name);
|
| 173 |
+
/** @} */
|
| 174 |
+
|
| 175 |
+
/* ------------------------------------------------------------------------- */
|
| 176 |
+
/** \brief Annotates an OpenCL event.
|
| 177 |
+
*
|
| 178 |
+
* Allows to associate an OpenCL event with a user-provided name.
|
| 179 |
+
*
|
| 180 |
+
* \param evnt - The handle of the OpenCL event to name.
|
| 181 |
+
* \param name - The name of the OpenCL event.
|
| 182 |
+
*
|
| 183 |
+
* \version \NVTX_VERSION_1
|
| 184 |
+
* @{ */
|
| 185 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClEventA(cl_event evnt, const char* name);
|
| 186 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClEventW(cl_event evnt, const wchar_t* name);
|
| 187 |
+
/** @} */
|
| 188 |
+
|
| 189 |
+
/** @} */ /* END RESOURCE_NAMING */
|
| 190 |
+
|
| 191 |
+
/* ========================================================================= */
|
| 192 |
+
#ifdef UNICODE
|
| 193 |
+
#define nvtxNameClDevice nvtxNameClDeviceW
|
| 194 |
+
#define nvtxNameClContext nvtxNameClContextW
|
| 195 |
+
#define nvtxNameClCommandQueue nvtxNameClCommandQueueW
|
| 196 |
+
#define nvtxNameClMemObject nvtxNameClMemObjectW
|
| 197 |
+
#define nvtxNameClSampler nvtxNameClSamplerW
|
| 198 |
+
#define nvtxNameClProgram nvtxNameClProgramW
|
| 199 |
+
#define nvtxNameClEvent nvtxNameClEventW
|
| 200 |
+
#else
|
| 201 |
+
#define nvtxNameClDevice nvtxNameClDeviceA
|
| 202 |
+
#define nvtxNameClContext nvtxNameClContextA
|
| 203 |
+
#define nvtxNameClCommandQueue nvtxNameClCommandQueueA
|
| 204 |
+
#define nvtxNameClMemObject nvtxNameClMemObjectA
|
| 205 |
+
#define nvtxNameClSampler nvtxNameClSamplerA
|
| 206 |
+
#define nvtxNameClProgram nvtxNameClProgramA
|
| 207 |
+
#define nvtxNameClEvent nvtxNameClEventA
|
| 208 |
+
#endif
|
| 209 |
+
|
| 210 |
+
#ifdef __cplusplus
|
| 211 |
+
}
|
| 212 |
+
#endif /* __cplusplus */
|
| 213 |
+
|
| 214 |
+
#ifndef NVTX_NO_IMPL
|
| 215 |
+
#define NVTX_IMPL_GUARD_OPENCL /* Ensure other headers cannot included directly */
|
| 216 |
+
#include "nvtxDetail/nvtxImplOpenCL_v3.h"
|
| 217 |
+
#undef NVTX_IMPL_GUARD_OPENCL
|
| 218 |
+
#endif /*NVTX_NO_IMPL*/
|
| 219 |
+
|
| 220 |
+
#endif /* NVTOOLSEXT_OPENCL_V3 */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvToolsExtSync.h
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO USER:
|
| 5 |
+
*
|
| 6 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 11 |
+
* of a form of NVIDIA software license agreement.
|
| 12 |
+
*
|
| 13 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 14 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 15 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 16 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 17 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 18 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 19 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 20 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 21 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 22 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 23 |
+
*
|
| 24 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 25 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 26 |
+
* "commercial computer software" and "commercial computer software
|
| 27 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 28 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 29 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 30 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 31 |
+
* source code with only those rights set forth herein.
|
| 32 |
+
*
|
| 33 |
+
* Any use of this source code in individual and commercial software must
|
| 34 |
+
* include, in the user documentation and internal comments to the code,
|
| 35 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
#include "nvToolsExt.h"
|
| 39 |
+
|
| 40 |
+
#ifndef NVTOOLSEXT_SYNC_V3
|
| 41 |
+
#define NVTOOLSEXT_SYNC_V3
|
| 42 |
+
|
| 43 |
+
#ifdef __cplusplus
|
| 44 |
+
extern "C" {
|
| 45 |
+
#endif /* __cplusplus */
|
| 46 |
+
|
| 47 |
+
/* \cond SHOW_HIDDEN
|
| 48 |
+
* \version \NVTX_VERSION_2
|
| 49 |
+
*/
|
| 50 |
+
#define NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE ( (uint16_t)( sizeof(nvtxSyncUserAttributes_v0) ) )
|
| 51 |
+
/** \endcond */
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* \page PAGE_SYNCHRONIZATION Synchronization
|
| 56 |
+
*
|
| 57 |
+
* This section covers a subset of the API that allow users to track additional
|
| 58 |
+
* synchronization details of their application. Naming OS synchronization primitives
|
| 59 |
+
* may allow users to better understand the data collected by traced synchronization
|
| 60 |
+
* APIs. Additionally, a user defined synchronization object can allow the users to
|
| 61 |
+
* to tell the tools when the user is building their own synchronization system
|
| 62 |
+
* that do not rely on the OS to provide behaviors and instead use techniques like
|
| 63 |
+
* atomic operations and spinlocks.
|
| 64 |
+
*
|
| 65 |
+
* See module \ref SYNCHRONIZATION for details.
|
| 66 |
+
*
|
| 67 |
+
* \par Example:
|
| 68 |
+
* \code
|
| 69 |
+
* class MyMutex
|
| 70 |
+
* {
|
| 71 |
+
* volatile long bLocked;
|
| 72 |
+
* nvtxSyncUser_t hSync;
|
| 73 |
+
* public:
|
| 74 |
+
* MyMutex(const char* name, nvtxDomainHandle_t d){
|
| 75 |
+
* bLocked = 0;
|
| 76 |
+
*
|
| 77 |
+
* nvtxSyncUserAttributes_t attribs = { 0 };
|
| 78 |
+
* attribs.version = NVTX_VERSION;
|
| 79 |
+
* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE;
|
| 80 |
+
* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 81 |
+
* attribs.message.ascii = name;
|
| 82 |
+
* hSync = nvtxDomainSyncUserCreate(d, &attribs);
|
| 83 |
+
* }
|
| 84 |
+
*
|
| 85 |
+
* ~MyMutex() {
|
| 86 |
+
* nvtxDomainSyncUserDestroy(hSync);
|
| 87 |
+
* }
|
| 88 |
+
*
|
| 89 |
+
* bool Lock() {
|
| 90 |
+
* nvtxDomainSyncUserAcquireStart(hSync);
|
| 91 |
+
* bool acquired = __sync_bool_compare_and_swap(&bLocked, 0, 1);//atomic compiler intrinsic
|
| 92 |
+
|
| 93 |
+
* if (acquired) {
|
| 94 |
+
* nvtxDomainSyncUserAcquireSuccess(hSync);
|
| 95 |
+
* }
|
| 96 |
+
* else {
|
| 97 |
+
* nvtxDomainSyncUserAcquireFailed(hSync);
|
| 98 |
+
* }
|
| 99 |
+
* return acquired;
|
| 100 |
+
* }
|
| 101 |
+
|
| 102 |
+
* void Unlock() {
|
| 103 |
+
* nvtxDomainSyncUserReleasing(hSync);
|
| 104 |
+
* bLocked = false;
|
| 105 |
+
* }
|
| 106 |
+
* };
|
| 107 |
+
* \endcode
|
| 108 |
+
*
|
| 109 |
+
* \version \NVTX_VERSION_2
|
| 110 |
+
*/
|
| 111 |
+
|
| 112 |
+
/* ------------------------------------------------------------------------- */
|
| 113 |
+
/* \cond SHOW_HIDDEN
|
| 114 |
+
* \brief Used to build a non-colliding value for resource types separated class
|
| 115 |
+
* \version \NVTX_VERSION_2
|
| 116 |
+
*/
|
| 117 |
+
#define NVTX_RESOURCE_CLASS_SYNC_OS 2 /**< Synchronization objects that are OS specific. */
|
| 118 |
+
#define NVTX_RESOURCE_CLASS_SYNC_PTHREAD 3 /**< Synchronization objects that are from the POSIX Threads API (pthread)*/
|
| 119 |
+
/** \endcond */
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
/* ------------------------------------------------------------------------- */
|
| 123 |
+
/** \defgroup SYNCHRONIZATION Synchronization
|
| 124 |
+
* See page \ref PAGE_SYNCHRONIZATION.
|
| 125 |
+
* @{
|
| 126 |
+
*/
|
| 127 |
+
|
| 128 |
+
/** \brief Resource type values for OSs with POSIX Thread API support
|
| 129 |
+
*/
|
| 130 |
+
typedef enum nvtxResourceSyncPosixThreadType_t
|
| 131 |
+
{
|
| 132 |
+
NVTX_RESOURCE_TYPE_SYNC_PTHREAD_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 1), /* pthread_mutex_t */
|
| 133 |
+
NVTX_RESOURCE_TYPE_SYNC_PTHREAD_CONDITION = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 2), /* pthread_cond_t */
|
| 134 |
+
NVTX_RESOURCE_TYPE_SYNC_PTHREAD_RWLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 3), /* pthread_rwlock_t */
|
| 135 |
+
NVTX_RESOURCE_TYPE_SYNC_PTHREAD_BARRIER = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 4), /* pthread_barrier_t */
|
| 136 |
+
NVTX_RESOURCE_TYPE_SYNC_PTHREAD_SPINLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 5), /* pthread_spinlock_t */
|
| 137 |
+
NVTX_RESOURCE_TYPE_SYNC_PTHREAD_ONCE = NVTX_RESOURCE_MAKE_TYPE(SYNC_PTHREAD, 6) /* pthread_once_t */
|
| 138 |
+
} nvtxResourceSyncPosixThreadType_t;
|
| 139 |
+
|
| 140 |
+
/** \brief Resource type values for Windows OSs
|
| 141 |
+
*/
|
| 142 |
+
typedef enum nvtxResourceSyncWindowsType_t
|
| 143 |
+
{
|
| 144 |
+
NVTX_RESOURCE_TYPE_SYNC_WINDOWS_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 1),
|
| 145 |
+
NVTX_RESOURCE_TYPE_SYNC_WINDOWS_SEMAPHORE = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 2),
|
| 146 |
+
NVTX_RESOURCE_TYPE_SYNC_WINDOWS_EVENT = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 3),
|
| 147 |
+
NVTX_RESOURCE_TYPE_SYNC_WINDOWS_CRITICAL_SECTION = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 4),
|
| 148 |
+
NVTX_RESOURCE_TYPE_SYNC_WINDOWS_SRWLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 5)
|
| 149 |
+
} nvtxResourceSyncWindowsType_t;
|
| 150 |
+
|
| 151 |
+
/** \brief Resource type values for Linux and Linux derived OSs such as Android
|
| 152 |
+
* \sa
|
| 153 |
+
* ::nvtxResourceSyncPosixThreadType_t
|
| 154 |
+
*/
|
| 155 |
+
typedef enum nvtxResourceSyncLinuxType_t
|
| 156 |
+
{
|
| 157 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_MUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 1),
|
| 158 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_FUTEX = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 2),
|
| 159 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_SEMAPHORE = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 3),
|
| 160 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_COMPLETION = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 4),
|
| 161 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_SPINLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 5),
|
| 162 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_SEQLOCK = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 6),
|
| 163 |
+
NVTX_RESOURCE_TYPE_SYNC_LINUX_RCU = NVTX_RESOURCE_MAKE_TYPE(SYNC_OS, 7)
|
| 164 |
+
} nvtxResourceSyncLinuxType_t;
|
| 165 |
+
|
| 166 |
+
/** \brief Resource type values for Android come from Linux.
|
| 167 |
+
* \sa
|
| 168 |
+
* ::nvtxResourceSyncLinuxType_t
|
| 169 |
+
* ::nvtxResourceSyncPosixThreadType_t
|
| 170 |
+
*/
|
| 171 |
+
typedef enum nvtxResourceSyncLinuxType_t nvtxResourceSyncAndroidType_t;
|
| 172 |
+
|
| 173 |
+
/** \brief User Defined Synchronization Object Handle .
|
| 174 |
+
* \anchor SYNCUSER_HANDLE_STRUCTURE
|
| 175 |
+
*
|
| 176 |
+
* This structure is opaque to the user and is used as a handle to reference
|
| 177 |
+
* a user defined syncrhonization object. The tools will return a pointer through the API for the application
|
| 178 |
+
* to hold on it's behalf to reference the string in the future.
|
| 179 |
+
*
|
| 180 |
+
*/
|
| 181 |
+
typedef struct nvtxSyncUser* nvtxSyncUser_t;
|
| 182 |
+
|
| 183 |
+
/** \brief User Defined Synchronization Object Attributes Structure.
|
| 184 |
+
* \anchor USERDEF_SYNC_ATTRIBUTES_STRUCTURE
|
| 185 |
+
*
|
| 186 |
+
* This structure is used to describe the attributes of a user defined synchronization
|
| 187 |
+
* object. The layout of the structure is defined by a specific version of the tools
|
| 188 |
+
* extension library and can change between different versions of the Tools Extension
|
| 189 |
+
* library.
|
| 190 |
+
*
|
| 191 |
+
* \par Initializing the Attributes
|
| 192 |
+
*
|
| 193 |
+
* The caller should always perform the following three tasks when using
|
| 194 |
+
* attributes:
|
| 195 |
+
* <ul>
|
| 196 |
+
* <li>Zero the structure
|
| 197 |
+
* <li>Set the version field
|
| 198 |
+
* <li>Set the size field
|
| 199 |
+
* </ul>
|
| 200 |
+
*
|
| 201 |
+
* Zeroing the structure sets all the event attributes types and values
|
| 202 |
+
* to the default value.
|
| 203 |
+
*
|
| 204 |
+
* The version and size field are used by the Tools Extension
|
| 205 |
+
* implementation to handle multiple versions of the attributes structure.
|
| 206 |
+
*
|
| 207 |
+
* It is recommended that the caller use one of the following to methods
|
| 208 |
+
* to initialize the event attributes structure:
|
| 209 |
+
*
|
| 210 |
+
* \par Method 1: Initializing nvtxEventAttributes for future compatibility
|
| 211 |
+
* \code
|
| 212 |
+
* nvtxSyncUserAttributes_t attribs = {0};
|
| 213 |
+
* attribs.version = NVTX_VERSION;
|
| 214 |
+
* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE;
|
| 215 |
+
* \endcode
|
| 216 |
+
*
|
| 217 |
+
* \par Method 2: Initializing nvtxSyncUserAttributes_t for a specific version
|
| 218 |
+
* \code
|
| 219 |
+
* nvtxSyncUserAttributes_t attribs = {0};
|
| 220 |
+
* attribs.version = 1;
|
| 221 |
+
* attribs.size = (uint16_t)(sizeof(nvtxSyncUserAttributes_t));
|
| 222 |
+
* \endcode
|
| 223 |
+
*
|
| 224 |
+
* If the caller uses Method 1 it is critical that the entire binary
|
| 225 |
+
* layout of the structure be configured to 0 so that all fields
|
| 226 |
+
* are initialized to the default value.
|
| 227 |
+
*
|
| 228 |
+
* The caller should either use both NVTX_VERSION and
|
| 229 |
+
* NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE (Method 1) or use explicit values
|
| 230 |
+
* and a versioned type (Method 2). Using a mix of the two methods
|
| 231 |
+
* will likely cause either source level incompatibility or binary
|
| 232 |
+
* incompatibility in the future.
|
| 233 |
+
*
|
| 234 |
+
* \par Settings Attribute Types and Values
|
| 235 |
+
*
|
| 236 |
+
*
|
| 237 |
+
* \par Example:
|
| 238 |
+
* \code
|
| 239 |
+
* // Initialize
|
| 240 |
+
* nvtxSyncUserAttributes_t attribs = {0};
|
| 241 |
+
* attribs.version = NVTX_VERSION;
|
| 242 |
+
* attribs.size = NVTX_SYNCUSER_ATTRIB_STRUCT_SIZE;
|
| 243 |
+
*
|
| 244 |
+
* // Configure the Attributes
|
| 245 |
+
* attribs.messageType = NVTX_MESSAGE_TYPE_ASCII;
|
| 246 |
+
* attribs.message.ascii = "Example";
|
| 247 |
+
* \endcode
|
| 248 |
+
*
|
| 249 |
+
* \sa
|
| 250 |
+
* ::nvtxDomainSyncUserCreate
|
| 251 |
+
*/
|
| 252 |
+
typedef struct nvtxSyncUserAttributes_v0
|
| 253 |
+
{
|
| 254 |
+
/**
|
| 255 |
+
* \brief Version flag of the structure.
|
| 256 |
+
*
|
| 257 |
+
* Needs to be set to NVTX_VERSION to indicate the version of NVTX APIs
|
| 258 |
+
* supported in this header file. This can optionally be overridden to
|
| 259 |
+
* another version of the tools extension library.
|
| 260 |
+
*/
|
| 261 |
+
uint16_t version;
|
| 262 |
+
|
| 263 |
+
/**
|
| 264 |
+
* \brief Size of the structure.
|
| 265 |
+
*
|
| 266 |
+
* Needs to be set to the size in bytes of the event attribute
|
| 267 |
+
* structure used to specify the event.
|
| 268 |
+
*/
|
| 269 |
+
uint16_t size;
|
| 270 |
+
|
| 271 |
+
/** \brief Message type specified in this attribute structure.
|
| 272 |
+
*
|
| 273 |
+
* Defines the message format of the attribute structure's \ref nvtxSyncUserAttributes_v0::message
|
| 274 |
+
* "message" field.
|
| 275 |
+
*
|
| 276 |
+
* Default Value is NVTX_MESSAGE_UNKNOWN
|
| 277 |
+
*/
|
| 278 |
+
int32_t messageType; /* nvtxMessageType_t */
|
| 279 |
+
|
| 280 |
+
/** \brief Message assigned to this attribute structure.
|
| 281 |
+
*
|
| 282 |
+
* The text message that is attached to an event.
|
| 283 |
+
*/
|
| 284 |
+
nvtxMessageValue_t message;
|
| 285 |
+
|
| 286 |
+
} nvtxSyncUserAttributes_v0;
|
| 287 |
+
|
| 288 |
+
typedef struct nvtxSyncUserAttributes_v0 nvtxSyncUserAttributes_t;
|
| 289 |
+
|
| 290 |
+
/* ------------------------------------------------------------------------- */
|
| 291 |
+
/** \brief Create a user defined synchronization object
|
| 292 |
+
* This is used to track non-OS synchronization working with spinlocks and atomics
|
| 293 |
+
*
|
| 294 |
+
* \param domain - Domain to own the resource
|
| 295 |
+
* \param attribs - A structure to assign multiple attributes to the object.
|
| 296 |
+
*
|
| 297 |
+
* \return A handle that represents the newly created user defined synchronization object.
|
| 298 |
+
*
|
| 299 |
+
* \sa
|
| 300 |
+
* ::nvtxDomainSyncUserCreate
|
| 301 |
+
* ::nvtxDomainSyncUserDestroy
|
| 302 |
+
* ::nvtxDomainSyncUserAcquireStart
|
| 303 |
+
* ::nvtxDomainSyncUserAcquireFailed
|
| 304 |
+
* ::nvtxDomainSyncUserAcquireSuccess
|
| 305 |
+
* ::nvtxDomainSyncUserReleasing
|
| 306 |
+
*
|
| 307 |
+
* \version \NVTX_VERSION_2
|
| 308 |
+
*/
|
| 309 |
+
NVTX_DECLSPEC nvtxSyncUser_t NVTX_API nvtxDomainSyncUserCreate(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs);
|
| 310 |
+
|
| 311 |
+
/* ------------------------------------------------------------------------- */
|
| 312 |
+
/** \brief Destroy a user defined synchronization object
|
| 313 |
+
* This is used to track non-OS synchronization working with spinlocks and atomics
|
| 314 |
+
*
|
| 315 |
+
* \param handle - A handle to the object to operate on.
|
| 316 |
+
*
|
| 317 |
+
* \sa
|
| 318 |
+
* ::nvtxDomainSyncUserCreate
|
| 319 |
+
* ::nvtxDomainSyncUserDestroy
|
| 320 |
+
* ::nvtxDomainSyncUserAcquireStart
|
| 321 |
+
* ::nvtxDomainSyncUserAcquireFailed
|
| 322 |
+
* ::nvtxDomainSyncUserAcquireSuccess
|
| 323 |
+
* ::nvtxDomainSyncUserReleasing
|
| 324 |
+
*
|
| 325 |
+
* \version \NVTX_VERSION_2
|
| 326 |
+
*/
|
| 327 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserDestroy(nvtxSyncUser_t handle);
|
| 328 |
+
|
| 329 |
+
/* ------------------------------------------------------------------------- */
|
| 330 |
+
/** \brief Signal to tools that an attempt to acquire a user defined synchronization object
|
| 331 |
+
*
|
| 332 |
+
* \param handle - A handle to the object to operate on.
|
| 333 |
+
*
|
| 334 |
+
* \sa
|
| 335 |
+
* ::nvtxDomainSyncUserCreate
|
| 336 |
+
* ::nvtxDomainSyncUserDestroy
|
| 337 |
+
* ::nvtxDomainSyncUserAcquireStart
|
| 338 |
+
* ::nvtxDomainSyncUserAcquireFailed
|
| 339 |
+
* ::nvtxDomainSyncUserAcquireSuccess
|
| 340 |
+
* ::nvtxDomainSyncUserReleasing
|
| 341 |
+
*
|
| 342 |
+
* \version \NVTX_VERSION_2
|
| 343 |
+
*/
|
| 344 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireStart(nvtxSyncUser_t handle);
|
| 345 |
+
|
| 346 |
+
/* ------------------------------------------------------------------------- */
|
| 347 |
+
/** \brief Signal to tools of failure in acquiring a user defined synchronization object
|
| 348 |
+
* This should be called after \ref nvtxDomainSyncUserAcquireStart
|
| 349 |
+
*
|
| 350 |
+
* \param handle - A handle to the object to operate on.
|
| 351 |
+
*
|
| 352 |
+
* \sa
|
| 353 |
+
* ::nvtxDomainSyncUserCreate
|
| 354 |
+
* ::nvtxDomainSyncUserDestroy
|
| 355 |
+
* ::nvtxDomainSyncUserAcquireStart
|
| 356 |
+
* ::nvtxDomainSyncUserAcquireFailed
|
| 357 |
+
* ::nvtxDomainSyncUserAcquireSuccess
|
| 358 |
+
* ::nvtxDomainSyncUserReleasing
|
| 359 |
+
*
|
| 360 |
+
* \version \NVTX_VERSION_2
|
| 361 |
+
*/NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireFailed(nvtxSyncUser_t handle);
|
| 362 |
+
|
| 363 |
+
/* ------------------------------------------------------------------------- */
|
| 364 |
+
/** \brief Signal to tools of success in acquiring a user defined synchronization object
|
| 365 |
+
* This should be called after \ref nvtxDomainSyncUserAcquireStart.
|
| 366 |
+
*
|
| 367 |
+
* \param handle - A handle to the object to operate on.
|
| 368 |
+
*
|
| 369 |
+
* \sa
|
| 370 |
+
* ::nvtxDomainSyncUserCreate
|
| 371 |
+
* ::nvtxDomainSyncUserDestroy
|
| 372 |
+
* ::nvtxDomainSyncUserAcquireStart
|
| 373 |
+
* ::nvtxDomainSyncUserAcquireFailed
|
| 374 |
+
* ::nvtxDomainSyncUserAcquireSuccess
|
| 375 |
+
* ::nvtxDomainSyncUserReleasing
|
| 376 |
+
*
|
| 377 |
+
* \version \NVTX_VERSION_2
|
| 378 |
+
*/NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireSuccess(nvtxSyncUser_t handle);
|
| 379 |
+
|
| 380 |
+
/* ------------------------------------------------------------------------- */
|
| 381 |
+
/** \brief Signal to tools of releasing a reservation on user defined synchronization object
|
| 382 |
+
* This should be called after \ref nvtxDomainSyncUserAcquireSuccess.
|
| 383 |
+
*
|
| 384 |
+
* \param handle - A handle to the object to operate on.
|
| 385 |
+
*
|
| 386 |
+
* \sa
|
| 387 |
+
* ::nvtxDomainSyncUserCreate
|
| 388 |
+
* ::nvtxDomainSyncUserDestroy
|
| 389 |
+
* ::nvtxDomainSyncUserAcquireStart
|
| 390 |
+
* ::nvtxDomainSyncUserAcquireFailed
|
| 391 |
+
* ::nvtxDomainSyncUserAcquireSuccess
|
| 392 |
+
* ::nvtxDomainSyncUserReleasing
|
| 393 |
+
*
|
| 394 |
+
* \version \NVTX_VERSION_2
|
| 395 |
+
*/
|
| 396 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserReleasing(nvtxSyncUser_t handle);
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
/** @} */ /*END defgroup*/
|
| 400 |
+
|
| 401 |
+
#ifdef __cplusplus
|
| 402 |
+
}
|
| 403 |
+
#endif /* __cplusplus */
|
| 404 |
+
|
| 405 |
+
#ifndef NVTX_NO_IMPL
|
| 406 |
+
#define NVTX_IMPL_GUARD_SYNC /* Ensure other headers cannot included directly */
|
| 407 |
+
#include "nvtxDetail/nvtxImplSync_v3.h"
|
| 408 |
+
#undef NVTX_IMPL_GUARD_SYNC
|
| 409 |
+
#endif /*NVTX_NO_IMPL*/
|
| 410 |
+
|
| 411 |
+
#endif /* NVTOOLSEXT_SYNC_V3 */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImpl.h
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* This file was procedurally generated! Do not modify this file by hand. */
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NOTICE TO USER:
|
| 7 |
+
*
|
| 8 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 9 |
+
* international Copyright laws.
|
| 10 |
+
*
|
| 11 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 12 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 13 |
+
* of a form of NVIDIA software license agreement.
|
| 14 |
+
*
|
| 15 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 16 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 17 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 18 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 19 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 20 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 21 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 22 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 23 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 24 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 25 |
+
*
|
| 26 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 27 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 28 |
+
* "commercial computer software" and "commercial computer software
|
| 29 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 30 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 31 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 32 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 33 |
+
* source code with only those rights set forth herein.
|
| 34 |
+
*
|
| 35 |
+
* Any use of this source code in individual and commercial software must
|
| 36 |
+
* include, in the user documentation and internal comments to the code,
|
| 37 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
#ifndef NVTX_IMPL_GUARD
|
| 41 |
+
#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined).
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
/* ---- Include required platform headers ---- */
|
| 45 |
+
|
| 46 |
+
#if defined(_WIN32)
|
| 47 |
+
|
| 48 |
+
#include <Windows.h>
|
| 49 |
+
|
| 50 |
+
#else
|
| 51 |
+
#include <unistd.h>
|
| 52 |
+
|
| 53 |
+
#if defined(__ANDROID__)
|
| 54 |
+
#include <android/api-level.h>
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
#if defined(__linux__) || defined(__CYGWIN__)
|
| 58 |
+
#include <sched.h>
|
| 59 |
+
#endif
|
| 60 |
+
|
| 61 |
+
#include <limits.h>
|
| 62 |
+
#include <dlfcn.h>
|
| 63 |
+
#include <fcntl.h>
|
| 64 |
+
#include <stdlib.h>
|
| 65 |
+
#include <stdio.h>
|
| 66 |
+
#include <sys/types.h>
|
| 67 |
+
#include <unistd.h>
|
| 68 |
+
#include <errno.h>
|
| 69 |
+
|
| 70 |
+
#include <string.h>
|
| 71 |
+
#include <sys/types.h>
|
| 72 |
+
#include <pthread.h>
|
| 73 |
+
#include <stdlib.h>
|
| 74 |
+
#include <wchar.h>
|
| 75 |
+
|
| 76 |
+
#endif
|
| 77 |
+
|
| 78 |
+
/* ---- Define macros used in this file ---- */
|
| 79 |
+
|
| 80 |
+
#define NVTX_INIT_STATE_FRESH 0
|
| 81 |
+
#define NVTX_INIT_STATE_STARTED 1
|
| 82 |
+
#define NVTX_INIT_STATE_COMPLETE 2
|
| 83 |
+
|
| 84 |
+
#ifdef NVTX_DEBUG_PRINT
|
| 85 |
+
#ifdef __ANDROID__
|
| 86 |
+
#include <android/log.h>
|
| 87 |
+
#define NVTX_ERR(...) __android_log_print(ANDROID_LOG_ERROR, "NVTOOLSEXT", __VA_ARGS__);
|
| 88 |
+
#define NVTX_INFO(...) __android_log_print(ANDROID_LOG_INFO, "NVTOOLSEXT", __VA_ARGS__);
|
| 89 |
+
#else
|
| 90 |
+
#include <stdio.h>
|
| 91 |
+
#define NVTX_ERR(...) fprintf(stderr, "NVTX_ERROR: " __VA_ARGS__)
|
| 92 |
+
#define NVTX_INFO(...) fprintf(stderr, "NVTX_INFO: " __VA_ARGS__)
|
| 93 |
+
#endif
|
| 94 |
+
#else /* !defined(NVTX_DEBUG_PRINT) */
|
| 95 |
+
#define NVTX_ERR(...)
|
| 96 |
+
#define NVTX_INFO(...)
|
| 97 |
+
#endif
|
| 98 |
+
|
| 99 |
+
#ifdef __cplusplus
|
| 100 |
+
extern "C" {
|
| 101 |
+
#endif /* __cplusplus */
|
| 102 |
+
|
| 103 |
+
#ifdef __GNUC__
|
| 104 |
+
#pragma GCC visibility push(hidden)
|
| 105 |
+
#endif
|
| 106 |
+
|
| 107 |
+
/* ---- Forward declare all functions referenced in globals ---- */
|
| 108 |
+
|
| 109 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(void);
|
| 110 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiGetModuleFunctionTable)(
|
| 111 |
+
NvtxCallbackModule module,
|
| 112 |
+
NvtxFunctionTable* out_table,
|
| 113 |
+
unsigned int* out_size);
|
| 114 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiSetInjectionNvtxVersion)(
|
| 115 |
+
uint32_t version);
|
| 116 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION const void* NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxGetExportTable)(
|
| 117 |
+
uint32_t exportTableId);
|
| 118 |
+
|
| 119 |
+
#include "nvtxInitDecls.h"
|
| 120 |
+
|
| 121 |
+
/* ---- Define all globals ---- */
|
| 122 |
+
|
| 123 |
+
typedef struct nvtxGlobals_t
|
| 124 |
+
{
|
| 125 |
+
volatile unsigned int initState;
|
| 126 |
+
NvtxExportTableCallbacks etblCallbacks;
|
| 127 |
+
NvtxExportTableVersionInfo etblVersionInfo;
|
| 128 |
+
|
| 129 |
+
/* Implementation function pointers */
|
| 130 |
+
nvtxMarkEx_impl_fntype nvtxMarkEx_impl_fnptr;
|
| 131 |
+
nvtxMarkA_impl_fntype nvtxMarkA_impl_fnptr;
|
| 132 |
+
nvtxMarkW_impl_fntype nvtxMarkW_impl_fnptr;
|
| 133 |
+
nvtxRangeStartEx_impl_fntype nvtxRangeStartEx_impl_fnptr;
|
| 134 |
+
nvtxRangeStartA_impl_fntype nvtxRangeStartA_impl_fnptr;
|
| 135 |
+
nvtxRangeStartW_impl_fntype nvtxRangeStartW_impl_fnptr;
|
| 136 |
+
nvtxRangeEnd_impl_fntype nvtxRangeEnd_impl_fnptr;
|
| 137 |
+
nvtxRangePushEx_impl_fntype nvtxRangePushEx_impl_fnptr;
|
| 138 |
+
nvtxRangePushA_impl_fntype nvtxRangePushA_impl_fnptr;
|
| 139 |
+
nvtxRangePushW_impl_fntype nvtxRangePushW_impl_fnptr;
|
| 140 |
+
nvtxRangePop_impl_fntype nvtxRangePop_impl_fnptr;
|
| 141 |
+
nvtxNameCategoryA_impl_fntype nvtxNameCategoryA_impl_fnptr;
|
| 142 |
+
nvtxNameCategoryW_impl_fntype nvtxNameCategoryW_impl_fnptr;
|
| 143 |
+
nvtxNameOsThreadA_impl_fntype nvtxNameOsThreadA_impl_fnptr;
|
| 144 |
+
nvtxNameOsThreadW_impl_fntype nvtxNameOsThreadW_impl_fnptr;
|
| 145 |
+
|
| 146 |
+
nvtxNameCuDeviceA_fakeimpl_fntype nvtxNameCuDeviceA_impl_fnptr;
|
| 147 |
+
nvtxNameCuDeviceW_fakeimpl_fntype nvtxNameCuDeviceW_impl_fnptr;
|
| 148 |
+
nvtxNameCuContextA_fakeimpl_fntype nvtxNameCuContextA_impl_fnptr;
|
| 149 |
+
nvtxNameCuContextW_fakeimpl_fntype nvtxNameCuContextW_impl_fnptr;
|
| 150 |
+
nvtxNameCuStreamA_fakeimpl_fntype nvtxNameCuStreamA_impl_fnptr;
|
| 151 |
+
nvtxNameCuStreamW_fakeimpl_fntype nvtxNameCuStreamW_impl_fnptr;
|
| 152 |
+
nvtxNameCuEventA_fakeimpl_fntype nvtxNameCuEventA_impl_fnptr;
|
| 153 |
+
nvtxNameCuEventW_fakeimpl_fntype nvtxNameCuEventW_impl_fnptr;
|
| 154 |
+
|
| 155 |
+
nvtxNameClDeviceA_fakeimpl_fntype nvtxNameClDeviceA_impl_fnptr;
|
| 156 |
+
nvtxNameClDeviceW_fakeimpl_fntype nvtxNameClDeviceW_impl_fnptr;
|
| 157 |
+
nvtxNameClContextA_fakeimpl_fntype nvtxNameClContextA_impl_fnptr;
|
| 158 |
+
nvtxNameClContextW_fakeimpl_fntype nvtxNameClContextW_impl_fnptr;
|
| 159 |
+
nvtxNameClCommandQueueA_fakeimpl_fntype nvtxNameClCommandQueueA_impl_fnptr;
|
| 160 |
+
nvtxNameClCommandQueueW_fakeimpl_fntype nvtxNameClCommandQueueW_impl_fnptr;
|
| 161 |
+
nvtxNameClMemObjectA_fakeimpl_fntype nvtxNameClMemObjectA_impl_fnptr;
|
| 162 |
+
nvtxNameClMemObjectW_fakeimpl_fntype nvtxNameClMemObjectW_impl_fnptr;
|
| 163 |
+
nvtxNameClSamplerA_fakeimpl_fntype nvtxNameClSamplerA_impl_fnptr;
|
| 164 |
+
nvtxNameClSamplerW_fakeimpl_fntype nvtxNameClSamplerW_impl_fnptr;
|
| 165 |
+
nvtxNameClProgramA_fakeimpl_fntype nvtxNameClProgramA_impl_fnptr;
|
| 166 |
+
nvtxNameClProgramW_fakeimpl_fntype nvtxNameClProgramW_impl_fnptr;
|
| 167 |
+
nvtxNameClEventA_fakeimpl_fntype nvtxNameClEventA_impl_fnptr;
|
| 168 |
+
nvtxNameClEventW_fakeimpl_fntype nvtxNameClEventW_impl_fnptr;
|
| 169 |
+
|
| 170 |
+
nvtxNameCudaDeviceA_impl_fntype nvtxNameCudaDeviceA_impl_fnptr;
|
| 171 |
+
nvtxNameCudaDeviceW_impl_fntype nvtxNameCudaDeviceW_impl_fnptr;
|
| 172 |
+
nvtxNameCudaStreamA_fakeimpl_fntype nvtxNameCudaStreamA_impl_fnptr;
|
| 173 |
+
nvtxNameCudaStreamW_fakeimpl_fntype nvtxNameCudaStreamW_impl_fnptr;
|
| 174 |
+
nvtxNameCudaEventA_fakeimpl_fntype nvtxNameCudaEventA_impl_fnptr;
|
| 175 |
+
nvtxNameCudaEventW_fakeimpl_fntype nvtxNameCudaEventW_impl_fnptr;
|
| 176 |
+
|
| 177 |
+
nvtxDomainMarkEx_impl_fntype nvtxDomainMarkEx_impl_fnptr;
|
| 178 |
+
nvtxDomainRangeStartEx_impl_fntype nvtxDomainRangeStartEx_impl_fnptr;
|
| 179 |
+
nvtxDomainRangeEnd_impl_fntype nvtxDomainRangeEnd_impl_fnptr;
|
| 180 |
+
nvtxDomainRangePushEx_impl_fntype nvtxDomainRangePushEx_impl_fnptr;
|
| 181 |
+
nvtxDomainRangePop_impl_fntype nvtxDomainRangePop_impl_fnptr;
|
| 182 |
+
nvtxDomainResourceCreate_impl_fntype nvtxDomainResourceCreate_impl_fnptr;
|
| 183 |
+
nvtxDomainResourceDestroy_impl_fntype nvtxDomainResourceDestroy_impl_fnptr;
|
| 184 |
+
nvtxDomainNameCategoryA_impl_fntype nvtxDomainNameCategoryA_impl_fnptr;
|
| 185 |
+
nvtxDomainNameCategoryW_impl_fntype nvtxDomainNameCategoryW_impl_fnptr;
|
| 186 |
+
nvtxDomainRegisterStringA_impl_fntype nvtxDomainRegisterStringA_impl_fnptr;
|
| 187 |
+
nvtxDomainRegisterStringW_impl_fntype nvtxDomainRegisterStringW_impl_fnptr;
|
| 188 |
+
nvtxDomainCreateA_impl_fntype nvtxDomainCreateA_impl_fnptr;
|
| 189 |
+
nvtxDomainCreateW_impl_fntype nvtxDomainCreateW_impl_fnptr;
|
| 190 |
+
nvtxDomainDestroy_impl_fntype nvtxDomainDestroy_impl_fnptr;
|
| 191 |
+
nvtxInitialize_impl_fntype nvtxInitialize_impl_fnptr;
|
| 192 |
+
|
| 193 |
+
nvtxDomainSyncUserCreate_impl_fntype nvtxDomainSyncUserCreate_impl_fnptr;
|
| 194 |
+
nvtxDomainSyncUserDestroy_impl_fntype nvtxDomainSyncUserDestroy_impl_fnptr;
|
| 195 |
+
nvtxDomainSyncUserAcquireStart_impl_fntype nvtxDomainSyncUserAcquireStart_impl_fnptr;
|
| 196 |
+
nvtxDomainSyncUserAcquireFailed_impl_fntype nvtxDomainSyncUserAcquireFailed_impl_fnptr;
|
| 197 |
+
nvtxDomainSyncUserAcquireSuccess_impl_fntype nvtxDomainSyncUserAcquireSuccess_impl_fnptr;
|
| 198 |
+
nvtxDomainSyncUserReleasing_impl_fntype nvtxDomainSyncUserReleasing_impl_fnptr;
|
| 199 |
+
|
| 200 |
+
/* Tables of function pointers -- Extra null added to the end to ensure
|
| 201 |
+
* a crash instead of silent corruption if a tool reads off the end. */
|
| 202 |
+
NvtxFunctionPointer* functionTable_CORE [NVTX_CBID_CORE_SIZE + 1];
|
| 203 |
+
NvtxFunctionPointer* functionTable_CUDA [NVTX_CBID_CUDA_SIZE + 1];
|
| 204 |
+
NvtxFunctionPointer* functionTable_OPENCL[NVTX_CBID_OPENCL_SIZE + 1];
|
| 205 |
+
NvtxFunctionPointer* functionTable_CUDART[NVTX_CBID_CUDART_SIZE + 1];
|
| 206 |
+
NvtxFunctionPointer* functionTable_CORE2 [NVTX_CBID_CORE2_SIZE + 1];
|
| 207 |
+
NvtxFunctionPointer* functionTable_SYNC [NVTX_CBID_SYNC_SIZE + 1];
|
| 208 |
+
} nvtxGlobals_t;
|
| 209 |
+
|
| 210 |
+
NVTX_LINKONCE_DEFINE_GLOBAL nvtxGlobals_t NVTX_VERSIONED_IDENTIFIER(nvtxGlobals) =
|
| 211 |
+
{
|
| 212 |
+
NVTX_INIT_STATE_FRESH,
|
| 213 |
+
|
| 214 |
+
{
|
| 215 |
+
sizeof(NvtxExportTableCallbacks),
|
| 216 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxEtiGetModuleFunctionTable)
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
sizeof(NvtxExportTableVersionInfo),
|
| 220 |
+
NVTX_VERSION,
|
| 221 |
+
0,
|
| 222 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxEtiSetInjectionNvtxVersion)
|
| 223 |
+
},
|
| 224 |
+
|
| 225 |
+
/* Implementation function pointers */
|
| 226 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init),
|
| 227 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init),
|
| 228 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init),
|
| 229 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init),
|
| 230 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init),
|
| 231 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init),
|
| 232 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init),
|
| 233 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init),
|
| 234 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init),
|
| 235 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init),
|
| 236 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init),
|
| 237 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init),
|
| 238 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init),
|
| 239 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init),
|
| 240 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init),
|
| 241 |
+
|
| 242 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init),
|
| 243 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init),
|
| 244 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init),
|
| 245 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init),
|
| 246 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init),
|
| 247 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init),
|
| 248 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init),
|
| 249 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init),
|
| 250 |
+
|
| 251 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init),
|
| 252 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init),
|
| 253 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init),
|
| 254 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init),
|
| 255 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init),
|
| 256 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init),
|
| 257 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init),
|
| 258 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init),
|
| 259 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init),
|
| 260 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init),
|
| 261 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init),
|
| 262 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init),
|
| 263 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init),
|
| 264 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init),
|
| 265 |
+
|
| 266 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init),
|
| 267 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init),
|
| 268 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init),
|
| 269 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init),
|
| 270 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init),
|
| 271 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init),
|
| 272 |
+
|
| 273 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init),
|
| 274 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init),
|
| 275 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init),
|
| 276 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init),
|
| 277 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init),
|
| 278 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init),
|
| 279 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init),
|
| 280 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init),
|
| 281 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init),
|
| 282 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init),
|
| 283 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init),
|
| 284 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init),
|
| 285 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init),
|
| 286 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init),
|
| 287 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init),
|
| 288 |
+
|
| 289 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init),
|
| 290 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init),
|
| 291 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init),
|
| 292 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init),
|
| 293 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init),
|
| 294 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init),
|
| 295 |
+
|
| 296 |
+
/* Tables of function pointers */
|
| 297 |
+
{
|
| 298 |
+
0,
|
| 299 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr,
|
| 300 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr,
|
| 301 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr,
|
| 302 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr,
|
| 303 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr,
|
| 304 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr,
|
| 305 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr,
|
| 306 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr,
|
| 307 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr,
|
| 308 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr,
|
| 309 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr,
|
| 310 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr,
|
| 311 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr,
|
| 312 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr,
|
| 313 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr,
|
| 314 |
+
0
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
0,
|
| 318 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr,
|
| 319 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr,
|
| 320 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr,
|
| 321 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr,
|
| 322 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr,
|
| 323 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr,
|
| 324 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr,
|
| 325 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr,
|
| 326 |
+
0
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
0,
|
| 330 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr,
|
| 331 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr,
|
| 332 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr,
|
| 333 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr,
|
| 334 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr,
|
| 335 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr,
|
| 336 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr,
|
| 337 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr,
|
| 338 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr,
|
| 339 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr,
|
| 340 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr,
|
| 341 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr,
|
| 342 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr,
|
| 343 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr,
|
| 344 |
+
0
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
0,
|
| 348 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr,
|
| 349 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr,
|
| 350 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr,
|
| 351 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr,
|
| 352 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr,
|
| 353 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr,
|
| 354 |
+
0
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
0,
|
| 358 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr,
|
| 359 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr,
|
| 360 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr,
|
| 361 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr,
|
| 362 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr,
|
| 363 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr,
|
| 364 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr,
|
| 365 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr,
|
| 366 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr,
|
| 367 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr,
|
| 368 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr,
|
| 369 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr,
|
| 370 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr,
|
| 371 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr,
|
| 372 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr,
|
| 373 |
+
0
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
0,
|
| 377 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr,
|
| 378 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr,
|
| 379 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr,
|
| 380 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr,
|
| 381 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr,
|
| 382 |
+
(NvtxFunctionPointer*)&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr,
|
| 383 |
+
0
|
| 384 |
+
}
|
| 385 |
+
};
|
| 386 |
+
|
| 387 |
+
/* ---- Define static inline implementations of core API functions ---- */
|
| 388 |
+
|
| 389 |
+
#include "nvtxImplCore.h"
|
| 390 |
+
|
| 391 |
+
/* ---- Define implementations of export table functions ---- */
|
| 392 |
+
|
| 393 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiGetModuleFunctionTable)(
|
| 394 |
+
NvtxCallbackModule module,
|
| 395 |
+
NvtxFunctionTable* out_table,
|
| 396 |
+
unsigned int* out_size)
|
| 397 |
+
{
|
| 398 |
+
unsigned int bytes = 0;
|
| 399 |
+
NvtxFunctionTable table = (NvtxFunctionTable)0;
|
| 400 |
+
|
| 401 |
+
switch (module)
|
| 402 |
+
{
|
| 403 |
+
case NVTX_CB_MODULE_CORE:
|
| 404 |
+
table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE;
|
| 405 |
+
bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE);
|
| 406 |
+
break;
|
| 407 |
+
case NVTX_CB_MODULE_CUDA:
|
| 408 |
+
table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDA;
|
| 409 |
+
bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDA);
|
| 410 |
+
break;
|
| 411 |
+
case NVTX_CB_MODULE_OPENCL:
|
| 412 |
+
table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_OPENCL;
|
| 413 |
+
bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_OPENCL);
|
| 414 |
+
break;
|
| 415 |
+
case NVTX_CB_MODULE_CUDART:
|
| 416 |
+
table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDART;
|
| 417 |
+
bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CUDART);
|
| 418 |
+
break;
|
| 419 |
+
case NVTX_CB_MODULE_CORE2:
|
| 420 |
+
table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE2;
|
| 421 |
+
bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_CORE2);
|
| 422 |
+
break;
|
| 423 |
+
case NVTX_CB_MODULE_SYNC:
|
| 424 |
+
table = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_SYNC;
|
| 425 |
+
bytes = (unsigned int)sizeof(NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).functionTable_SYNC);
|
| 426 |
+
break;
|
| 427 |
+
default: return 0;
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
if (out_size)
|
| 431 |
+
*out_size = (bytes / (unsigned int)sizeof(NvtxFunctionPointer*)) - 1;
|
| 432 |
+
|
| 433 |
+
if (out_table)
|
| 434 |
+
*out_table = table;
|
| 435 |
+
|
| 436 |
+
return 1;
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
NVTX_LINKONCE_DEFINE_FUNCTION const void* NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxGetExportTable)(uint32_t exportTableId)
|
| 440 |
+
{
|
| 441 |
+
switch (exportTableId)
|
| 442 |
+
{
|
| 443 |
+
case NVTX_ETID_CALLBACKS: return &NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).etblCallbacks;
|
| 444 |
+
case NVTX_ETID_VERSIONINFO: return &NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).etblVersionInfo;
|
| 445 |
+
default: return 0;
|
| 446 |
+
}
|
| 447 |
+
}
|
| 448 |
+
|
| 449 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxEtiSetInjectionNvtxVersion)(uint32_t version)
|
| 450 |
+
{
|
| 451 |
+
/* Reserved for custom implementations to resolve problems with tools */
|
| 452 |
+
(void)version;
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
/* ---- Define implementations of init versions of all API functions ---- */
|
| 456 |
+
|
| 457 |
+
#include "nvtxInitDefs.h"
|
| 458 |
+
|
| 459 |
+
/* ---- Define implementations of initialization functions ---- */
|
| 460 |
+
|
| 461 |
+
#include "nvtxInit.h"
|
| 462 |
+
|
| 463 |
+
#ifdef __GNUC__
|
| 464 |
+
#pragma GCC visibility pop
|
| 465 |
+
#endif
|
| 466 |
+
|
| 467 |
+
#ifdef __cplusplus
|
| 468 |
+
} /* extern "C" */
|
| 469 |
+
#endif /* __cplusplus */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCore.h
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
NVTX_DECLSPEC void NVTX_API nvtxMarkEx(const nvtxEventAttributes_t* eventAttrib)
|
| 2 |
+
{
|
| 3 |
+
#ifndef NVTX_DISABLE
|
| 4 |
+
nvtxMarkEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr;
|
| 5 |
+
if(local!=0)
|
| 6 |
+
(*local)(eventAttrib);
|
| 7 |
+
#endif /*NVTX_DISABLE*/
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
NVTX_DECLSPEC void NVTX_API nvtxMarkA(const char* message)
|
| 11 |
+
{
|
| 12 |
+
#ifndef NVTX_DISABLE
|
| 13 |
+
nvtxMarkA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr;
|
| 14 |
+
if(local!=0)
|
| 15 |
+
(*local)(message);
|
| 16 |
+
#endif /*NVTX_DISABLE*/
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
NVTX_DECLSPEC void NVTX_API nvtxMarkW(const wchar_t* message)
|
| 20 |
+
{
|
| 21 |
+
#ifndef NVTX_DISABLE
|
| 22 |
+
nvtxMarkW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr;
|
| 23 |
+
if(local!=0)
|
| 24 |
+
(*local)(message);
|
| 25 |
+
#endif /*NVTX_DISABLE*/
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartEx(const nvtxEventAttributes_t* eventAttrib)
|
| 29 |
+
{
|
| 30 |
+
#ifndef NVTX_DISABLE
|
| 31 |
+
nvtxRangeStartEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr;
|
| 32 |
+
if(local!=0)
|
| 33 |
+
return (*local)(eventAttrib);
|
| 34 |
+
else
|
| 35 |
+
#endif /*NVTX_DISABLE*/
|
| 36 |
+
return (nvtxRangeId_t)0;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartA(const char* message)
|
| 40 |
+
{
|
| 41 |
+
#ifndef NVTX_DISABLE
|
| 42 |
+
nvtxRangeStartA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr;
|
| 43 |
+
if(local!=0)
|
| 44 |
+
return (*local)(message);
|
| 45 |
+
else
|
| 46 |
+
#endif /*NVTX_DISABLE*/
|
| 47 |
+
return (nvtxRangeId_t)0;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxRangeStartW(const wchar_t* message)
|
| 51 |
+
{
|
| 52 |
+
#ifndef NVTX_DISABLE
|
| 53 |
+
nvtxRangeStartW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr;
|
| 54 |
+
if(local!=0)
|
| 55 |
+
return (*local)(message);
|
| 56 |
+
else
|
| 57 |
+
#endif /*NVTX_DISABLE*/
|
| 58 |
+
return (nvtxRangeId_t)0;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
NVTX_DECLSPEC void NVTX_API nvtxRangeEnd(nvtxRangeId_t id)
|
| 62 |
+
{
|
| 63 |
+
#ifndef NVTX_DISABLE
|
| 64 |
+
nvtxRangeEnd_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr;
|
| 65 |
+
if(local!=0)
|
| 66 |
+
(*local)(id);
|
| 67 |
+
#endif /*NVTX_DISABLE*/
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePushEx(const nvtxEventAttributes_t* eventAttrib)
|
| 71 |
+
{
|
| 72 |
+
#ifndef NVTX_DISABLE
|
| 73 |
+
nvtxRangePushEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr;
|
| 74 |
+
if(local!=0)
|
| 75 |
+
return (*local)(eventAttrib);
|
| 76 |
+
else
|
| 77 |
+
#endif /*NVTX_DISABLE*/
|
| 78 |
+
return (int)NVTX_NO_PUSH_POP_TRACKING;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePushA(const char* message)
|
| 82 |
+
{
|
| 83 |
+
#ifndef NVTX_DISABLE
|
| 84 |
+
nvtxRangePushA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr;
|
| 85 |
+
if(local!=0)
|
| 86 |
+
return (*local)(message);
|
| 87 |
+
else
|
| 88 |
+
#endif /*NVTX_DISABLE*/
|
| 89 |
+
return (int)NVTX_NO_PUSH_POP_TRACKING;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePushW(const wchar_t* message)
|
| 93 |
+
{
|
| 94 |
+
#ifndef NVTX_DISABLE
|
| 95 |
+
nvtxRangePushW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr;
|
| 96 |
+
if(local!=0)
|
| 97 |
+
return (*local)(message);
|
| 98 |
+
else
|
| 99 |
+
#endif /*NVTX_DISABLE*/
|
| 100 |
+
return (int)NVTX_NO_PUSH_POP_TRACKING;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
NVTX_DECLSPEC int NVTX_API nvtxRangePop(void)
|
| 104 |
+
{
|
| 105 |
+
#ifndef NVTX_DISABLE
|
| 106 |
+
nvtxRangePop_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr;
|
| 107 |
+
if(local!=0)
|
| 108 |
+
return (*local)();
|
| 109 |
+
else
|
| 110 |
+
#endif /*NVTX_DISABLE*/
|
| 111 |
+
return (int)NVTX_NO_PUSH_POP_TRACKING;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCategoryA(uint32_t category, const char* name)
|
| 115 |
+
{
|
| 116 |
+
#ifndef NVTX_DISABLE
|
| 117 |
+
nvtxNameCategoryA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr;
|
| 118 |
+
if(local!=0)
|
| 119 |
+
(*local)(category, name);
|
| 120 |
+
#endif /*NVTX_DISABLE*/
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCategoryW(uint32_t category, const wchar_t* name)
|
| 124 |
+
{
|
| 125 |
+
#ifndef NVTX_DISABLE
|
| 126 |
+
nvtxNameCategoryW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr;
|
| 127 |
+
if(local!=0)
|
| 128 |
+
(*local)(category, name);
|
| 129 |
+
#endif /*NVTX_DISABLE*/
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadA(uint32_t threadId, const char* name)
|
| 133 |
+
{
|
| 134 |
+
#ifndef NVTX_DISABLE
|
| 135 |
+
nvtxNameOsThreadA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr;
|
| 136 |
+
if(local!=0)
|
| 137 |
+
(*local)(threadId, name);
|
| 138 |
+
#endif /*NVTX_DISABLE*/
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameOsThreadW(uint32_t threadId, const wchar_t* name)
|
| 142 |
+
{
|
| 143 |
+
#ifndef NVTX_DISABLE
|
| 144 |
+
nvtxNameOsThreadW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr;
|
| 145 |
+
if(local!=0)
|
| 146 |
+
(*local)(threadId, name);
|
| 147 |
+
#endif /*NVTX_DISABLE*/
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainMarkEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib)
|
| 151 |
+
{
|
| 152 |
+
#ifndef NVTX_DISABLE
|
| 153 |
+
nvtxDomainMarkEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr;
|
| 154 |
+
if(local!=0)
|
| 155 |
+
(*local)(domain, eventAttrib);
|
| 156 |
+
#endif /*NVTX_DISABLE*/
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
NVTX_DECLSPEC nvtxRangeId_t NVTX_API nvtxDomainRangeStartEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib)
|
| 160 |
+
{
|
| 161 |
+
#ifndef NVTX_DISABLE
|
| 162 |
+
nvtxDomainRangeStartEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr;
|
| 163 |
+
if(local!=0)
|
| 164 |
+
return (*local)(domain, eventAttrib);
|
| 165 |
+
else
|
| 166 |
+
#endif /*NVTX_DISABLE*/
|
| 167 |
+
return (nvtxRangeId_t)0;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainRangeEnd(nvtxDomainHandle_t domain, nvtxRangeId_t id)
|
| 171 |
+
{
|
| 172 |
+
#ifndef NVTX_DISABLE
|
| 173 |
+
nvtxDomainRangeEnd_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr;
|
| 174 |
+
if(local!=0)
|
| 175 |
+
(*local)(domain, id);
|
| 176 |
+
#endif /*NVTX_DISABLE*/
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
NVTX_DECLSPEC int NVTX_API nvtxDomainRangePushEx(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib)
|
| 180 |
+
{
|
| 181 |
+
#ifndef NVTX_DISABLE
|
| 182 |
+
nvtxDomainRangePushEx_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr;
|
| 183 |
+
if(local!=0)
|
| 184 |
+
return (*local)(domain, eventAttrib);
|
| 185 |
+
else
|
| 186 |
+
#endif /*NVTX_DISABLE*/
|
| 187 |
+
return (int)NVTX_NO_PUSH_POP_TRACKING;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
NVTX_DECLSPEC int NVTX_API nvtxDomainRangePop(nvtxDomainHandle_t domain)
|
| 191 |
+
{
|
| 192 |
+
#ifndef NVTX_DISABLE
|
| 193 |
+
nvtxDomainRangePop_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr;
|
| 194 |
+
if(local!=0)
|
| 195 |
+
return (*local)(domain);
|
| 196 |
+
else
|
| 197 |
+
#endif /*NVTX_DISABLE*/
|
| 198 |
+
return (int)NVTX_NO_PUSH_POP_TRACKING;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
NVTX_DECLSPEC nvtxResourceHandle_t NVTX_API nvtxDomainResourceCreate(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs)
|
| 202 |
+
{
|
| 203 |
+
#ifndef NVTX_DISABLE
|
| 204 |
+
nvtxDomainResourceCreate_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr;
|
| 205 |
+
if(local!=0)
|
| 206 |
+
return (*local)(domain, attribs);
|
| 207 |
+
else
|
| 208 |
+
#endif /*NVTX_DISABLE*/
|
| 209 |
+
return (nvtxResourceHandle_t)0;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainResourceDestroy(nvtxResourceHandle_t resource)
|
| 213 |
+
{
|
| 214 |
+
#ifndef NVTX_DISABLE
|
| 215 |
+
nvtxDomainResourceDestroy_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr;
|
| 216 |
+
if(local!=0)
|
| 217 |
+
(*local)(resource);
|
| 218 |
+
#endif /*NVTX_DISABLE*/
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryA(nvtxDomainHandle_t domain, uint32_t category, const char* name)
|
| 222 |
+
{
|
| 223 |
+
#ifndef NVTX_DISABLE
|
| 224 |
+
nvtxDomainNameCategoryA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr;
|
| 225 |
+
if(local!=0)
|
| 226 |
+
(*local)(domain, category, name);
|
| 227 |
+
#endif /*NVTX_DISABLE*/
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainNameCategoryW(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name)
|
| 231 |
+
{
|
| 232 |
+
#ifndef NVTX_DISABLE
|
| 233 |
+
nvtxDomainNameCategoryW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr;
|
| 234 |
+
if(local!=0)
|
| 235 |
+
(*local)(domain, category, name);
|
| 236 |
+
#endif /*NVTX_DISABLE*/
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringA(nvtxDomainHandle_t domain, const char* string)
|
| 240 |
+
{
|
| 241 |
+
#ifndef NVTX_DISABLE
|
| 242 |
+
nvtxDomainRegisterStringA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr;
|
| 243 |
+
if(local!=0)
|
| 244 |
+
return (*local)(domain, string);
|
| 245 |
+
else
|
| 246 |
+
#endif /*NVTX_DISABLE*/
|
| 247 |
+
return (nvtxStringHandle_t)0;
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
NVTX_DECLSPEC nvtxStringHandle_t NVTX_API nvtxDomainRegisterStringW(nvtxDomainHandle_t domain, const wchar_t* string)
|
| 251 |
+
{
|
| 252 |
+
#ifndef NVTX_DISABLE
|
| 253 |
+
nvtxDomainRegisterStringW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr;
|
| 254 |
+
if(local!=0)
|
| 255 |
+
return (*local)(domain, string);
|
| 256 |
+
else
|
| 257 |
+
#endif /*NVTX_DISABLE*/
|
| 258 |
+
return (nvtxStringHandle_t)0;
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateA(const char* message)
|
| 262 |
+
{
|
| 263 |
+
#ifndef NVTX_DISABLE
|
| 264 |
+
nvtxDomainCreateA_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr;
|
| 265 |
+
if(local!=0)
|
| 266 |
+
return (*local)(message);
|
| 267 |
+
else
|
| 268 |
+
#endif /*NVTX_DISABLE*/
|
| 269 |
+
return (nvtxDomainHandle_t)0;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
NVTX_DECLSPEC nvtxDomainHandle_t NVTX_API nvtxDomainCreateW(const wchar_t* message)
|
| 273 |
+
{
|
| 274 |
+
#ifndef NVTX_DISABLE
|
| 275 |
+
nvtxDomainCreateW_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr;
|
| 276 |
+
if(local!=0)
|
| 277 |
+
return (*local)(message);
|
| 278 |
+
else
|
| 279 |
+
#endif /*NVTX_DISABLE*/
|
| 280 |
+
return (nvtxDomainHandle_t)0;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainDestroy(nvtxDomainHandle_t domain)
|
| 284 |
+
{
|
| 285 |
+
#ifndef NVTX_DISABLE
|
| 286 |
+
nvtxDomainDestroy_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr;
|
| 287 |
+
if(local!=0)
|
| 288 |
+
(*local)(domain);
|
| 289 |
+
#endif /*NVTX_DISABLE*/
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
NVTX_DECLSPEC void NVTX_API nvtxInitialize(const void* reserved)
|
| 293 |
+
{
|
| 294 |
+
#ifndef NVTX_DISABLE
|
| 295 |
+
nvtxInitialize_impl_fntype local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr;
|
| 296 |
+
if(local!=0)
|
| 297 |
+
(*local)(reserved);
|
| 298 |
+
#endif /*NVTX_DISABLE*/
|
| 299 |
+
}
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* This file was procedurally generated! Do not modify this file by hand. */
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NOTICE TO USER:
|
| 7 |
+
*
|
| 8 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 9 |
+
* international Copyright laws.
|
| 10 |
+
*
|
| 11 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 12 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 13 |
+
* of a form of NVIDIA software license agreement.
|
| 14 |
+
*
|
| 15 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 16 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 17 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 18 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 19 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 20 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 21 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 22 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 23 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 24 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 25 |
+
*
|
| 26 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 27 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 28 |
+
* "commercial computer software" and "commercial computer software
|
| 29 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 30 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 31 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 32 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 33 |
+
* source code with only those rights set forth herein.
|
| 34 |
+
*
|
| 35 |
+
* Any use of this source code in individual and commercial software must
|
| 36 |
+
* include, in the user documentation and internal comments to the code,
|
| 37 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
#ifndef NVTX_IMPL_GUARD_CUDART
|
| 41 |
+
#error Never include this file directly -- it is automatically included by nvToolsExtCudaRt.h (except when NVTX_NO_IMPL is defined).
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
#ifdef __cplusplus
|
| 45 |
+
extern "C" {
|
| 46 |
+
#endif /* __cplusplus */
|
| 47 |
+
|
| 48 |
+
//typedef void (NVTX_API * nvtxNameCudaDeviceA_impl_fntype)(int device, const char* name);
|
| 49 |
+
//typedef void (NVTX_API * nvtxNameCudaDeviceW_impl_fntype)(int device, const wchar_t* name);
|
| 50 |
+
typedef void (NVTX_API * nvtxNameCudaStreamA_impl_fntype)(cudaStream_t stream, const char* name);
|
| 51 |
+
typedef void (NVTX_API * nvtxNameCudaStreamW_impl_fntype)(cudaStream_t stream, const wchar_t* name);
|
| 52 |
+
typedef void (NVTX_API * nvtxNameCudaEventA_impl_fntype)(cudaEvent_t event, const char* name);
|
| 53 |
+
typedef void (NVTX_API * nvtxNameCudaEventW_impl_fntype)(cudaEvent_t event, const wchar_t* name);
|
| 54 |
+
|
| 55 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceA(int device, const char* name)
|
| 56 |
+
{
|
| 57 |
+
#ifndef NVTX_DISABLE
|
| 58 |
+
nvtxNameCudaDeviceA_impl_fntype local = (nvtxNameCudaDeviceA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr;
|
| 59 |
+
if(local!=0)
|
| 60 |
+
(*local)(device, name);
|
| 61 |
+
#endif /*NVTX_DISABLE*/
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaDeviceW(int device, const wchar_t* name)
|
| 65 |
+
{
|
| 66 |
+
#ifndef NVTX_DISABLE
|
| 67 |
+
nvtxNameCudaDeviceW_impl_fntype local = (nvtxNameCudaDeviceW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr;
|
| 68 |
+
if(local!=0)
|
| 69 |
+
(*local)(device, name);
|
| 70 |
+
#endif /*NVTX_DISABLE*/
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamA(cudaStream_t stream, const char* name)
|
| 74 |
+
{
|
| 75 |
+
#ifndef NVTX_DISABLE
|
| 76 |
+
nvtxNameCudaStreamA_impl_fntype local = (nvtxNameCudaStreamA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr;
|
| 77 |
+
if(local!=0)
|
| 78 |
+
(*local)(stream, name);
|
| 79 |
+
#endif /*NVTX_DISABLE*/
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaStreamW(cudaStream_t stream, const wchar_t* name)
|
| 83 |
+
{
|
| 84 |
+
#ifndef NVTX_DISABLE
|
| 85 |
+
nvtxNameCudaStreamW_impl_fntype local = (nvtxNameCudaStreamW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr;
|
| 86 |
+
if(local!=0)
|
| 87 |
+
(*local)(stream, name);
|
| 88 |
+
#endif /*NVTX_DISABLE*/
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventA(cudaEvent_t event, const char* name)
|
| 92 |
+
{
|
| 93 |
+
#ifndef NVTX_DISABLE
|
| 94 |
+
nvtxNameCudaEventA_impl_fntype local = (nvtxNameCudaEventA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr;
|
| 95 |
+
if(local!=0)
|
| 96 |
+
(*local)(event, name);
|
| 97 |
+
#endif /*NVTX_DISABLE*/
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCudaEventW(cudaEvent_t event, const wchar_t* name)
|
| 101 |
+
{
|
| 102 |
+
#ifndef NVTX_DISABLE
|
| 103 |
+
nvtxNameCudaEventW_impl_fntype local = (nvtxNameCudaEventW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr;
|
| 104 |
+
if(local!=0)
|
| 105 |
+
(*local)(event, name);
|
| 106 |
+
#endif /*NVTX_DISABLE*/
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
#ifdef __cplusplus
|
| 110 |
+
} /* extern "C" */
|
| 111 |
+
#endif /* __cplusplus */
|
| 112 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* This file was procedurally generated! Do not modify this file by hand. */
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NOTICE TO USER:
|
| 7 |
+
*
|
| 8 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 9 |
+
* international Copyright laws.
|
| 10 |
+
*
|
| 11 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 12 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 13 |
+
* of a form of NVIDIA software license agreement.
|
| 14 |
+
*
|
| 15 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 16 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 17 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 18 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 19 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 20 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 21 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 22 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 23 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 24 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 25 |
+
*
|
| 26 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 27 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 28 |
+
* "commercial computer software" and "commercial computer software
|
| 29 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 30 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 31 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 32 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 33 |
+
* source code with only those rights set forth herein.
|
| 34 |
+
*
|
| 35 |
+
* Any use of this source code in individual and commercial software must
|
| 36 |
+
* include, in the user documentation and internal comments to the code,
|
| 37 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
#ifndef NVTX_IMPL_GUARD_CUDA
|
| 41 |
+
#error Never include this file directly -- it is automatically included by nvToolsExtCuda.h (except when NVTX_NO_IMPL is defined).
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#ifdef __cplusplus
|
| 46 |
+
extern "C" {
|
| 47 |
+
#endif /* __cplusplus */
|
| 48 |
+
|
| 49 |
+
typedef void (NVTX_API * nvtxNameCuDeviceA_impl_fntype)(CUdevice device, const char* name);
|
| 50 |
+
typedef void (NVTX_API * nvtxNameCuDeviceW_impl_fntype)(CUdevice device, const wchar_t* name);
|
| 51 |
+
typedef void (NVTX_API * nvtxNameCuContextA_impl_fntype)(CUcontext context, const char* name);
|
| 52 |
+
typedef void (NVTX_API * nvtxNameCuContextW_impl_fntype)(CUcontext context, const wchar_t* name);
|
| 53 |
+
typedef void (NVTX_API * nvtxNameCuStreamA_impl_fntype)(CUstream stream, const char* name);
|
| 54 |
+
typedef void (NVTX_API * nvtxNameCuStreamW_impl_fntype)(CUstream stream, const wchar_t* name);
|
| 55 |
+
typedef void (NVTX_API * nvtxNameCuEventA_impl_fntype)(CUevent event, const char* name);
|
| 56 |
+
typedef void (NVTX_API * nvtxNameCuEventW_impl_fntype)(CUevent event, const wchar_t* name);
|
| 57 |
+
|
| 58 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceA(CUdevice device, const char* name)
|
| 59 |
+
{
|
| 60 |
+
#ifndef NVTX_DISABLE
|
| 61 |
+
nvtxNameCuDeviceA_impl_fntype local = (nvtxNameCuDeviceA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr;
|
| 62 |
+
if(local!=0)
|
| 63 |
+
(*local)(device, name);
|
| 64 |
+
#endif /*NVTX_DISABLE*/
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuDeviceW(CUdevice device, const wchar_t* name)
|
| 68 |
+
{
|
| 69 |
+
#ifndef NVTX_DISABLE
|
| 70 |
+
nvtxNameCuDeviceW_impl_fntype local = (nvtxNameCuDeviceW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr;
|
| 71 |
+
if(local!=0)
|
| 72 |
+
(*local)(device, name);
|
| 73 |
+
#endif /*NVTX_DISABLE*/
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuContextA(CUcontext context, const char* name)
|
| 77 |
+
{
|
| 78 |
+
#ifndef NVTX_DISABLE
|
| 79 |
+
nvtxNameCuContextA_impl_fntype local = (nvtxNameCuContextA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr;
|
| 80 |
+
if(local!=0)
|
| 81 |
+
(*local)(context, name);
|
| 82 |
+
#endif /*NVTX_DISABLE*/
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuContextW(CUcontext context, const wchar_t* name)
|
| 86 |
+
{
|
| 87 |
+
#ifndef NVTX_DISABLE
|
| 88 |
+
nvtxNameCuContextW_impl_fntype local = (nvtxNameCuContextW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr;
|
| 89 |
+
if(local!=0)
|
| 90 |
+
(*local)(context, name);
|
| 91 |
+
#endif /*NVTX_DISABLE*/
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamA(CUstream stream, const char* name)
|
| 95 |
+
{
|
| 96 |
+
#ifndef NVTX_DISABLE
|
| 97 |
+
nvtxNameCuStreamA_impl_fntype local = (nvtxNameCuStreamA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr;
|
| 98 |
+
if(local!=0)
|
| 99 |
+
(*local)(stream, name);
|
| 100 |
+
#endif /*NVTX_DISABLE*/
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuStreamW(CUstream stream, const wchar_t* name)
|
| 104 |
+
{
|
| 105 |
+
#ifndef NVTX_DISABLE
|
| 106 |
+
nvtxNameCuStreamW_impl_fntype local = (nvtxNameCuStreamW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr;
|
| 107 |
+
if(local!=0)
|
| 108 |
+
(*local)(stream, name);
|
| 109 |
+
#endif /*NVTX_DISABLE*/
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuEventA(CUevent event, const char* name)
|
| 113 |
+
{
|
| 114 |
+
#ifndef NVTX_DISABLE
|
| 115 |
+
nvtxNameCuEventA_impl_fntype local = (nvtxNameCuEventA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr;
|
| 116 |
+
if(local!=0)
|
| 117 |
+
(*local)(event, name);
|
| 118 |
+
#endif /*NVTX_DISABLE*/
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameCuEventW(CUevent event, const wchar_t* name)
|
| 122 |
+
{
|
| 123 |
+
#ifndef NVTX_DISABLE
|
| 124 |
+
nvtxNameCuEventW_impl_fntype local = (nvtxNameCuEventW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr;
|
| 125 |
+
if(local!=0)
|
| 126 |
+
(*local)(event, name);
|
| 127 |
+
#endif /*NVTX_DISABLE*/
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
#ifdef __cplusplus
|
| 131 |
+
} /* extern "C" */
|
| 132 |
+
#endif /* __cplusplus */
|
| 133 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* This file was procedurally generated! Do not modify this file by hand. */
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NOTICE TO USER:
|
| 7 |
+
*
|
| 8 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 9 |
+
* international Copyright laws.
|
| 10 |
+
*
|
| 11 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 12 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 13 |
+
* of a form of NVIDIA software license agreement.
|
| 14 |
+
*
|
| 15 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 16 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 17 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 18 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 19 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 20 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 21 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 22 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 23 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 24 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 25 |
+
*
|
| 26 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 27 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 28 |
+
* "commercial computer software" and "commercial computer software
|
| 29 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 30 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 31 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 32 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 33 |
+
* source code with only those rights set forth herein.
|
| 34 |
+
*
|
| 35 |
+
* Any use of this source code in individual and commercial software must
|
| 36 |
+
* include, in the user documentation and internal comments to the code,
|
| 37 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
#ifndef NVTX_IMPL_GUARD_OPENCL
|
| 41 |
+
#error Never include this file directly -- it is automatically included by nvToolsExtCuda.h (except when NVTX_NO_IMPL is defined).
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#ifdef __cplusplus
|
| 46 |
+
extern "C" {
|
| 47 |
+
#endif /* __cplusplus */
|
| 48 |
+
|
| 49 |
+
typedef void (NVTX_API * nvtxNameClDeviceA_impl_fntype)(cl_device_id device, const char* name);
|
| 50 |
+
typedef void (NVTX_API * nvtxNameClDeviceW_impl_fntype)(cl_device_id device, const wchar_t* name);
|
| 51 |
+
typedef void (NVTX_API * nvtxNameClContextA_impl_fntype)(cl_context context, const char* name);
|
| 52 |
+
typedef void (NVTX_API * nvtxNameClContextW_impl_fntype)(cl_context context, const wchar_t* name);
|
| 53 |
+
typedef void (NVTX_API * nvtxNameClCommandQueueA_impl_fntype)(cl_command_queue command_queue, const char* name);
|
| 54 |
+
typedef void (NVTX_API * nvtxNameClCommandQueueW_impl_fntype)(cl_command_queue command_queue, const wchar_t* name);
|
| 55 |
+
typedef void (NVTX_API * nvtxNameClMemObjectA_impl_fntype)(cl_mem memobj, const char* name);
|
| 56 |
+
typedef void (NVTX_API * nvtxNameClMemObjectW_impl_fntype)(cl_mem memobj, const wchar_t* name);
|
| 57 |
+
typedef void (NVTX_API * nvtxNameClSamplerA_impl_fntype)(cl_sampler sampler, const char* name);
|
| 58 |
+
typedef void (NVTX_API * nvtxNameClSamplerW_impl_fntype)(cl_sampler sampler, const wchar_t* name);
|
| 59 |
+
typedef void (NVTX_API * nvtxNameClProgramA_impl_fntype)(cl_program program, const char* name);
|
| 60 |
+
typedef void (NVTX_API * nvtxNameClProgramW_impl_fntype)(cl_program program, const wchar_t* name);
|
| 61 |
+
typedef void (NVTX_API * nvtxNameClEventA_impl_fntype)(cl_event evnt, const char* name);
|
| 62 |
+
typedef void (NVTX_API * nvtxNameClEventW_impl_fntype)(cl_event evnt, const wchar_t* name);
|
| 63 |
+
|
| 64 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceA(cl_device_id device, const char* name)
|
| 65 |
+
{
|
| 66 |
+
#ifndef NVTX_DISABLE
|
| 67 |
+
nvtxNameClDeviceA_impl_fntype local = (nvtxNameClDeviceA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr;
|
| 68 |
+
if(local!=0)
|
| 69 |
+
(*local)(device, name);
|
| 70 |
+
#endif /*NVTX_DISABLE*/
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClDeviceW(cl_device_id device, const wchar_t* name)
|
| 74 |
+
{
|
| 75 |
+
#ifndef NVTX_DISABLE
|
| 76 |
+
nvtxNameClDeviceW_impl_fntype local = (nvtxNameClDeviceW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr;
|
| 77 |
+
if(local!=0)
|
| 78 |
+
(*local)(device, name);
|
| 79 |
+
#endif /*NVTX_DISABLE*/
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClContextA(cl_context context, const char* name)
|
| 83 |
+
{
|
| 84 |
+
#ifndef NVTX_DISABLE
|
| 85 |
+
nvtxNameClContextA_impl_fntype local = (nvtxNameClContextA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr;
|
| 86 |
+
if(local!=0)
|
| 87 |
+
(*local)(context, name);
|
| 88 |
+
#endif /*NVTX_DISABLE*/
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClContextW(cl_context context, const wchar_t* name)
|
| 92 |
+
{
|
| 93 |
+
#ifndef NVTX_DISABLE
|
| 94 |
+
nvtxNameClContextW_impl_fntype local = (nvtxNameClContextW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr;
|
| 95 |
+
if(local!=0)
|
| 96 |
+
(*local)(context, name);
|
| 97 |
+
#endif /*NVTX_DISABLE*/
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueA(cl_command_queue command_queue, const char* name)
|
| 101 |
+
{
|
| 102 |
+
#ifndef NVTX_DISABLE
|
| 103 |
+
nvtxNameClCommandQueueA_impl_fntype local = (nvtxNameClCommandQueueA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr;
|
| 104 |
+
if(local!=0)
|
| 105 |
+
(*local)(command_queue, name);
|
| 106 |
+
#endif /*NVTX_DISABLE*/
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClCommandQueueW(cl_command_queue command_queue, const wchar_t* name)
|
| 110 |
+
{
|
| 111 |
+
#ifndef NVTX_DISABLE
|
| 112 |
+
nvtxNameClCommandQueueW_impl_fntype local = (nvtxNameClCommandQueueW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr;
|
| 113 |
+
if(local!=0)
|
| 114 |
+
(*local)(command_queue, name);
|
| 115 |
+
#endif /*NVTX_DISABLE*/
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectA(cl_mem memobj, const char* name)
|
| 119 |
+
{
|
| 120 |
+
#ifndef NVTX_DISABLE
|
| 121 |
+
nvtxNameClMemObjectA_impl_fntype local = (nvtxNameClMemObjectA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr;
|
| 122 |
+
if(local!=0)
|
| 123 |
+
(*local)(memobj, name);
|
| 124 |
+
#endif /*NVTX_DISABLE*/
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClMemObjectW(cl_mem memobj, const wchar_t* name)
|
| 128 |
+
{
|
| 129 |
+
#ifndef NVTX_DISABLE
|
| 130 |
+
nvtxNameClMemObjectW_impl_fntype local = (nvtxNameClMemObjectW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr;
|
| 131 |
+
if(local!=0)
|
| 132 |
+
(*local)(memobj, name);
|
| 133 |
+
#endif /*NVTX_DISABLE*/
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerA(cl_sampler sampler, const char* name)
|
| 137 |
+
{
|
| 138 |
+
#ifndef NVTX_DISABLE
|
| 139 |
+
nvtxNameClSamplerA_impl_fntype local = (nvtxNameClSamplerA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr;
|
| 140 |
+
if(local!=0)
|
| 141 |
+
(*local)(sampler, name);
|
| 142 |
+
#endif /*NVTX_DISABLE*/
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClSamplerW(cl_sampler sampler, const wchar_t* name)
|
| 146 |
+
{
|
| 147 |
+
#ifndef NVTX_DISABLE
|
| 148 |
+
nvtxNameClSamplerW_impl_fntype local = (nvtxNameClSamplerW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr;
|
| 149 |
+
if(local!=0)
|
| 150 |
+
(*local)(sampler, name);
|
| 151 |
+
#endif /*NVTX_DISABLE*/
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClProgramA(cl_program program, const char* name)
|
| 155 |
+
{
|
| 156 |
+
#ifndef NVTX_DISABLE
|
| 157 |
+
nvtxNameClProgramA_impl_fntype local = (nvtxNameClProgramA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr;
|
| 158 |
+
if(local!=0)
|
| 159 |
+
(*local)(program, name);
|
| 160 |
+
#endif /*NVTX_DISABLE*/
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClProgramW(cl_program program, const wchar_t* name)
|
| 164 |
+
{
|
| 165 |
+
#ifndef NVTX_DISABLE
|
| 166 |
+
nvtxNameClProgramW_impl_fntype local = (nvtxNameClProgramW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr;
|
| 167 |
+
if(local!=0)
|
| 168 |
+
(*local)(program, name);
|
| 169 |
+
#endif /*NVTX_DISABLE*/
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClEventA(cl_event evnt, const char* name)
|
| 173 |
+
{
|
| 174 |
+
#ifndef NVTX_DISABLE
|
| 175 |
+
nvtxNameClEventA_impl_fntype local = (nvtxNameClEventA_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr;
|
| 176 |
+
if(local!=0)
|
| 177 |
+
(*local)(evnt, name);
|
| 178 |
+
#endif /*NVTX_DISABLE*/
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
NVTX_DECLSPEC void NVTX_API nvtxNameClEventW(cl_event evnt, const wchar_t* name)
|
| 182 |
+
{
|
| 183 |
+
#ifndef NVTX_DISABLE
|
| 184 |
+
nvtxNameClEventW_impl_fntype local = (nvtxNameClEventW_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr;
|
| 185 |
+
if(local!=0)
|
| 186 |
+
(*local)(evnt, name);
|
| 187 |
+
#endif /*NVTX_DISABLE*/
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
#ifdef __cplusplus
|
| 191 |
+
} /* extern "C" */
|
| 192 |
+
#endif /* __cplusplus */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* This file was procedurally generated! Do not modify this file by hand. */
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NOTICE TO USER:
|
| 7 |
+
*
|
| 8 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 9 |
+
* international Copyright laws.
|
| 10 |
+
*
|
| 11 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 12 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 13 |
+
* of a form of NVIDIA software license agreement.
|
| 14 |
+
*
|
| 15 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 16 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 17 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 18 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 19 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 20 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 21 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 22 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 23 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 24 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 25 |
+
*
|
| 26 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 27 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 28 |
+
* "commercial computer software" and "commercial computer software
|
| 29 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 30 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 31 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 32 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 33 |
+
* source code with only those rights set forth herein.
|
| 34 |
+
*
|
| 35 |
+
* Any use of this source code in individual and commercial software must
|
| 36 |
+
* include, in the user documentation and internal comments to the code,
|
| 37 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
#ifndef NVTX_IMPL_GUARD_SYNC
|
| 41 |
+
#error Never include this file directly -- it is automatically included by nvToolsExtCuda.h (except when NVTX_NO_IMPL is defined).
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
#ifdef __cplusplus
|
| 46 |
+
extern "C" {
|
| 47 |
+
#endif /* __cplusplus */
|
| 48 |
+
|
| 49 |
+
typedef nvtxSyncUser_t (NVTX_API * nvtxDomainSyncUserCreate_impl_fntype)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs);
|
| 50 |
+
typedef void (NVTX_API * nvtxDomainSyncUserDestroy_impl_fntype)(nvtxSyncUser_t handle);
|
| 51 |
+
typedef void (NVTX_API * nvtxDomainSyncUserAcquireStart_impl_fntype)(nvtxSyncUser_t handle);
|
| 52 |
+
typedef void (NVTX_API * nvtxDomainSyncUserAcquireFailed_impl_fntype)(nvtxSyncUser_t handle);
|
| 53 |
+
typedef void (NVTX_API * nvtxDomainSyncUserAcquireSuccess_impl_fntype)(nvtxSyncUser_t handle);
|
| 54 |
+
typedef void (NVTX_API * nvtxDomainSyncUserReleasing_impl_fntype)(nvtxSyncUser_t handle);
|
| 55 |
+
|
| 56 |
+
NVTX_DECLSPEC nvtxSyncUser_t NVTX_API nvtxDomainSyncUserCreate(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs)
|
| 57 |
+
{
|
| 58 |
+
#ifndef NVTX_DISABLE
|
| 59 |
+
nvtxDomainSyncUserCreate_impl_fntype local = (nvtxDomainSyncUserCreate_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr;
|
| 60 |
+
if(local!=0)
|
| 61 |
+
return (*local)(domain, attribs);
|
| 62 |
+
else
|
| 63 |
+
#endif /*NVTX_DISABLE*/
|
| 64 |
+
return (nvtxSyncUser_t)0;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserDestroy(nvtxSyncUser_t handle)
|
| 68 |
+
{
|
| 69 |
+
#ifndef NVTX_DISABLE
|
| 70 |
+
nvtxDomainSyncUserDestroy_impl_fntype local = (nvtxDomainSyncUserDestroy_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr;
|
| 71 |
+
if(local!=0)
|
| 72 |
+
(*local)(handle);
|
| 73 |
+
#endif /*NVTX_DISABLE*/
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireStart(nvtxSyncUser_t handle)
|
| 77 |
+
{
|
| 78 |
+
#ifndef NVTX_DISABLE
|
| 79 |
+
nvtxDomainSyncUserAcquireStart_impl_fntype local = (nvtxDomainSyncUserAcquireStart_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr;
|
| 80 |
+
if(local!=0)
|
| 81 |
+
(*local)(handle);
|
| 82 |
+
#endif /*NVTX_DISABLE*/
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireFailed(nvtxSyncUser_t handle)
|
| 86 |
+
{
|
| 87 |
+
#ifndef NVTX_DISABLE
|
| 88 |
+
nvtxDomainSyncUserAcquireFailed_impl_fntype local = (nvtxDomainSyncUserAcquireFailed_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr;
|
| 89 |
+
if(local!=0)
|
| 90 |
+
(*local)(handle);
|
| 91 |
+
#endif /*NVTX_DISABLE*/
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserAcquireSuccess(nvtxSyncUser_t handle)
|
| 95 |
+
{
|
| 96 |
+
#ifndef NVTX_DISABLE
|
| 97 |
+
nvtxDomainSyncUserAcquireSuccess_impl_fntype local = (nvtxDomainSyncUserAcquireSuccess_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr;
|
| 98 |
+
if(local!=0)
|
| 99 |
+
(*local)(handle);
|
| 100 |
+
#endif /*NVTX_DISABLE*/
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
NVTX_DECLSPEC void NVTX_API nvtxDomainSyncUserReleasing(nvtxSyncUser_t handle)
|
| 104 |
+
{
|
| 105 |
+
#ifndef NVTX_DISABLE
|
| 106 |
+
nvtxDomainSyncUserReleasing_impl_fntype local = (nvtxDomainSyncUserReleasing_impl_fntype)NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr;
|
| 107 |
+
if(local!=0)
|
| 108 |
+
(*local)(handle);
|
| 109 |
+
#endif /*NVTX_DISABLE*/
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
#ifdef __cplusplus
|
| 113 |
+
} /* extern "C" */
|
| 114 |
+
#endif /* __cplusplus */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInit.h
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* This file was procedurally generated! Do not modify this file by hand. */
|
| 2 |
+
|
| 3 |
+
/*
|
| 4 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NOTICE TO USER:
|
| 7 |
+
*
|
| 8 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 9 |
+
* international Copyright laws.
|
| 10 |
+
*
|
| 11 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 12 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 13 |
+
* of a form of NVIDIA software license agreement.
|
| 14 |
+
*
|
| 15 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 16 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 17 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 18 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 19 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 20 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 21 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 22 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 23 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 24 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 25 |
+
*
|
| 26 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 27 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 28 |
+
* "commercial computer software" and "commercial computer software
|
| 29 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 30 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 31 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 32 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 33 |
+
* source code with only those rights set forth herein.
|
| 34 |
+
*
|
| 35 |
+
* Any use of this source code in individual and commercial software must
|
| 36 |
+
* include, in the user documentation and internal comments to the code,
|
| 37 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
#ifndef NVTX_IMPL_GUARD
|
| 41 |
+
#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined).
|
| 42 |
+
#endif
|
| 43 |
+
|
| 44 |
+
/* ---- Platform-independent helper definitions and functions ---- */
|
| 45 |
+
|
| 46 |
+
/* Prefer macros over inline functions to reduce symbol resolution at link time */
|
| 47 |
+
|
| 48 |
+
#if defined(_WIN32)
|
| 49 |
+
#define NVTX_PATHCHAR wchar_t
|
| 50 |
+
#define NVTX_STR(x) L##x
|
| 51 |
+
#define NVTX_GETENV _wgetenv
|
| 52 |
+
#define NVTX_BUFSIZE MAX_PATH
|
| 53 |
+
#define NVTX_DLLHANDLE HMODULE
|
| 54 |
+
#define NVTX_DLLOPEN(x) LoadLibraryW(x)
|
| 55 |
+
#define NVTX_DLLFUNC GetProcAddress
|
| 56 |
+
#define NVTX_DLLCLOSE FreeLibrary
|
| 57 |
+
#define NVTX_YIELD() SwitchToThread()
|
| 58 |
+
#define NVTX_MEMBAR() MemoryBarrier()
|
| 59 |
+
#define NVTX_ATOMIC_WRITE_32(address, value) InterlockedExchange((volatile LONG*)address, value)
|
| 60 |
+
#define NVTX_ATOMIC_CAS_32(old, address, exchange, comparand) old = InterlockedCompareExchange((volatile LONG*)address, exchange, comparand)
|
| 61 |
+
#elif defined(__GNUC__)
|
| 62 |
+
#define NVTX_PATHCHAR char
|
| 63 |
+
#define NVTX_STR(x) x
|
| 64 |
+
#define NVTX_GETENV getenv
|
| 65 |
+
#define NVTX_BUFSIZE PATH_MAX
|
| 66 |
+
#define NVTX_DLLHANDLE void*
|
| 67 |
+
#define NVTX_DLLOPEN(x) dlopen(x, RTLD_LAZY)
|
| 68 |
+
#define NVTX_DLLFUNC dlsym
|
| 69 |
+
#define NVTX_DLLCLOSE dlclose
|
| 70 |
+
#define NVTX_YIELD() sched_yield()
|
| 71 |
+
#define NVTX_MEMBAR() __sync_synchronize()
|
| 72 |
+
/* Ensure full memory barrier for atomics, to match Windows functions */
|
| 73 |
+
#define NVTX_ATOMIC_WRITE_32(address, value) __sync_synchronize(); __sync_lock_test_and_set(address, value)
|
| 74 |
+
#define NVTX_ATOMIC_CAS_32(old, address, exchange, comparand) __sync_synchronize(); old = __sync_val_compare_and_swap(address, exchange, comparand)
|
| 75 |
+
#else
|
| 76 |
+
#error The library does not support your configuration!
|
| 77 |
+
#endif
|
| 78 |
+
|
| 79 |
+
/* Define this to 1 for platforms that where pre-injected libraries can be discovered. */
|
| 80 |
+
#if defined(_WIN32)
|
| 81 |
+
/* TODO */
|
| 82 |
+
#define NVTX_SUPPORT_ALREADY_INJECTED_LIBRARY 0
|
| 83 |
+
#else
|
| 84 |
+
#define NVTX_SUPPORT_ALREADY_INJECTED_LIBRARY 0
|
| 85 |
+
#endif
|
| 86 |
+
|
| 87 |
+
/* Define this to 1 for platforms that support environment variables */
|
| 88 |
+
/* TODO: Detect UWP, a.k.a. Windows Store app, and set this to 0. */
|
| 89 |
+
/* Try: #if defined(WINAPI_FAMILY_PARTITION) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) */
|
| 90 |
+
#define NVTX_SUPPORT_ENV_VARS 1
|
| 91 |
+
|
| 92 |
+
/* Define this to 1 for platforms that support dynamic/shared libraries */
|
| 93 |
+
#define NVTX_SUPPORT_DYNAMIC_INJECTION_LIBRARY 1
|
| 94 |
+
|
| 95 |
+
/* Injection libraries implementing InitializeInjectionNvtx2 may be statically linked,
|
| 96 |
+
* and this will override any dynamic injection. Useful for platforms where dynamic
|
| 97 |
+
* injection is not available. Since weak symbols not explicitly marked extern are
|
| 98 |
+
* guaranteed to be initialized to zero if no definitions are found by the linker, the
|
| 99 |
+
* dynamic injection process proceeds normally if pfnInitializeInjectionNvtx2 is 0. */
|
| 100 |
+
#if defined(__GNUC__) && !defined(_WIN32) && !defined(__CYGWIN__)
|
| 101 |
+
#define NVTX_SUPPORT_STATIC_INJECTION_LIBRARY 1
|
| 102 |
+
/* To statically inject an NVTX library, define InitializeInjectionNvtx2_fnptr as a normal
|
| 103 |
+
* symbol (not weak) pointing to the implementation of InitializeInjectionNvtx2 (which
|
| 104 |
+
* does not need to be named "InitializeInjectionNvtx2" as is necessary in a dynamic
|
| 105 |
+
* injection library. */
|
| 106 |
+
__attribute__((weak)) NvtxInitializeInjectionNvtxFunc_t InitializeInjectionNvtx2_fnptr;
|
| 107 |
+
#else
|
| 108 |
+
#define NVTX_SUPPORT_STATIC_INJECTION_LIBRARY 0
|
| 109 |
+
#endif
|
| 110 |
+
|
| 111 |
+
/* This function tries to find or load an NVTX injection library and get the
|
| 112 |
+
* address of its InitializeInjection2 function. If such a function pointer
|
| 113 |
+
* is found, it is called, and passed the address of this NVTX instance's
|
| 114 |
+
* nvtxGetExportTable function, so the injection can attach to this instance.
|
| 115 |
+
* If the initialization fails for any reason, any dynamic library loaded will
|
| 116 |
+
* be freed, and all NVTX implementation functions will be set to no-ops. If
|
| 117 |
+
* initialization succeeds, NVTX functions not attached to the tool will be set
|
| 118 |
+
* to no-ops. This is implemented as one function instead of several small
|
| 119 |
+
* functions to minimize the number of weak symbols the linker must resolve.
|
| 120 |
+
* Order of search is:
|
| 121 |
+
* - Pre-injected library exporting InitializeInjectionNvtx2
|
| 122 |
+
* - Loadable library exporting InitializeInjectionNvtx2
|
| 123 |
+
* - Path specified by env var NVTX_INJECTION??_PATH (?? is 32 or 64)
|
| 124 |
+
* - On Android, libNvtxInjection??.so within the package (?? is 32 or 64)
|
| 125 |
+
* - Statically-linked injection library defining InitializeInjectionNvtx2_fnptr
|
| 126 |
+
*/
|
| 127 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_VERSIONED_IDENTIFIER(nvtxInitializeInjectionLibrary)(void);
|
| 128 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_VERSIONED_IDENTIFIER(nvtxInitializeInjectionLibrary)(void)
|
| 129 |
+
{
|
| 130 |
+
const char* const initFuncName = "InitializeInjectionNvtx2";
|
| 131 |
+
NvtxInitializeInjectionNvtxFunc_t init_fnptr = (NvtxInitializeInjectionNvtxFunc_t)0;
|
| 132 |
+
NVTX_DLLHANDLE injectionLibraryHandle = (NVTX_DLLHANDLE)0;
|
| 133 |
+
int entryPointStatus = 0;
|
| 134 |
+
|
| 135 |
+
#if NVTX_SUPPORT_ALREADY_INJECTED_LIBRARY
|
| 136 |
+
/* Use POSIX global symbol chain to query for init function from any module */
|
| 137 |
+
init_fnptr = (NvtxInitializeInjectionNvtxFunc_t)NVTX_DLLFUNC(0, initFuncName);
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
#if NVTX_SUPPORT_DYNAMIC_INJECTION_LIBRARY
|
| 141 |
+
/* Try discovering dynamic injection library to load */
|
| 142 |
+
if (!init_fnptr)
|
| 143 |
+
{
|
| 144 |
+
#if NVTX_SUPPORT_ENV_VARS
|
| 145 |
+
/* If env var NVTX_INJECTION64_PATH is set, it should contain the path
|
| 146 |
+
* to a 64-bit dynamic NVTX injection library (and similar for 32-bit). */
|
| 147 |
+
const NVTX_PATHCHAR* const nvtxEnvVarName = (sizeof(void*) == 4)
|
| 148 |
+
? NVTX_STR("NVTX_INJECTION32_PATH")
|
| 149 |
+
: NVTX_STR("NVTX_INJECTION64_PATH");
|
| 150 |
+
#endif /* NVTX_SUPPORT_ENV_VARS */
|
| 151 |
+
NVTX_PATHCHAR injectionLibraryPathBuf[NVTX_BUFSIZE];
|
| 152 |
+
const NVTX_PATHCHAR* injectionLibraryPath = (const NVTX_PATHCHAR*)0;
|
| 153 |
+
|
| 154 |
+
/* Refer to this variable explicitly in case all references to it are #if'ed out */
|
| 155 |
+
(void)injectionLibraryPathBuf;
|
| 156 |
+
|
| 157 |
+
#if NVTX_SUPPORT_ENV_VARS
|
| 158 |
+
/* Disable the warning for getenv & _wgetenv -- this usage is safe because
|
| 159 |
+
* these functions are not called again before using the returned value. */
|
| 160 |
+
#if defined(_MSC_VER)
|
| 161 |
+
#pragma warning( push )
|
| 162 |
+
#pragma warning( disable : 4996 )
|
| 163 |
+
#endif
|
| 164 |
+
injectionLibraryPath = NVTX_GETENV(nvtxEnvVarName);
|
| 165 |
+
#if defined(_MSC_VER)
|
| 166 |
+
#pragma warning( pop )
|
| 167 |
+
#endif
|
| 168 |
+
#endif
|
| 169 |
+
|
| 170 |
+
#if defined(__ANDROID__)
|
| 171 |
+
if (!injectionLibraryPath)
|
| 172 |
+
{
|
| 173 |
+
const char *bits = (sizeof(void*) == 4) ? "32" : "64";
|
| 174 |
+
char cmdlineBuf[32];
|
| 175 |
+
char pkgName[PATH_MAX];
|
| 176 |
+
int count;
|
| 177 |
+
int pid;
|
| 178 |
+
FILE *fp;
|
| 179 |
+
size_t bytesRead;
|
| 180 |
+
size_t pos;
|
| 181 |
+
|
| 182 |
+
pid = (int)getpid();
|
| 183 |
+
count = snprintf(cmdlineBuf, sizeof(cmdlineBuf), "/proc/%d/cmdline", pid);
|
| 184 |
+
if (count <= 0 || count >= (int)sizeof(cmdlineBuf))
|
| 185 |
+
{
|
| 186 |
+
NVTX_ERR("Path buffer too small for: /proc/%d/cmdline\n", pid);
|
| 187 |
+
return NVTX_ERR_INIT_ACCESS_LIBRARY;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
fp = fopen(cmdlineBuf, "r");
|
| 191 |
+
if (!fp)
|
| 192 |
+
{
|
| 193 |
+
NVTX_ERR("File couldn't be opened: %s\n", cmdlineBuf);
|
| 194 |
+
return NVTX_ERR_INIT_ACCESS_LIBRARY;
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
bytesRead = fread(pkgName, 1, sizeof(pkgName) - 1, fp);
|
| 198 |
+
fclose(fp);
|
| 199 |
+
if (bytesRead == 0)
|
| 200 |
+
{
|
| 201 |
+
NVTX_ERR("Package name couldn't be read from file: %s\n", cmdlineBuf);
|
| 202 |
+
return NVTX_ERR_INIT_ACCESS_LIBRARY;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
pkgName[bytesRead] = 0;
|
| 206 |
+
|
| 207 |
+
/* String can contain colon as a process separator. In this case the package name is before the colon. */
|
| 208 |
+
pos = 0;
|
| 209 |
+
while (pos < bytesRead && pkgName[pos] != ':' && pkgName[pos] != '\0')
|
| 210 |
+
{
|
| 211 |
+
++pos;
|
| 212 |
+
}
|
| 213 |
+
pkgName[pos] = 0;
|
| 214 |
+
|
| 215 |
+
count = snprintf(injectionLibraryPathBuf, NVTX_BUFSIZE, "/data/data/%s/files/libNvtxInjection%s.so", pkgName, bits);
|
| 216 |
+
if (count <= 0 || count >= NVTX_BUFSIZE)
|
| 217 |
+
{
|
| 218 |
+
NVTX_ERR("Path buffer too small for: /data/data/%s/files/libNvtxInjection%s.so\n", pkgName, bits);
|
| 219 |
+
return NVTX_ERR_INIT_ACCESS_LIBRARY;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
/* On Android, verify path is accessible due to aggressive file access restrictions. */
|
| 223 |
+
/* For dlopen, if the filename contains a leading slash, then it is interpreted as a */
|
| 224 |
+
/* relative or absolute pathname; otherwise it will follow the rules in ld.so. */
|
| 225 |
+
if (injectionLibraryPathBuf[0] == '/')
|
| 226 |
+
{
|
| 227 |
+
#if (__ANDROID_API__ < 21)
|
| 228 |
+
int access_err = access(injectionLibraryPathBuf, F_OK | R_OK);
|
| 229 |
+
#else
|
| 230 |
+
int access_err = faccessat(AT_FDCWD, injectionLibraryPathBuf, F_OK | R_OK, 0);
|
| 231 |
+
#endif
|
| 232 |
+
if (access_err != 0)
|
| 233 |
+
{
|
| 234 |
+
NVTX_ERR("Injection library path wasn't accessible [code=%s] [path=%s]\n", strerror(errno), injectionLibraryPathBuf);
|
| 235 |
+
return NVTX_ERR_INIT_ACCESS_LIBRARY;
|
| 236 |
+
}
|
| 237 |
+
}
|
| 238 |
+
injectionLibraryPath = injectionLibraryPathBuf;
|
| 239 |
+
}
|
| 240 |
+
#endif
|
| 241 |
+
|
| 242 |
+
/* At this point, injectionLibraryPath is specified if a dynamic
|
| 243 |
+
* injection library was specified by a tool. */
|
| 244 |
+
if (injectionLibraryPath)
|
| 245 |
+
{
|
| 246 |
+
/* Load the injection library */
|
| 247 |
+
injectionLibraryHandle = NVTX_DLLOPEN(injectionLibraryPath);
|
| 248 |
+
if (!injectionLibraryHandle)
|
| 249 |
+
{
|
| 250 |
+
NVTX_ERR("Failed to load injection library\n");
|
| 251 |
+
return NVTX_ERR_INIT_LOAD_LIBRARY;
|
| 252 |
+
}
|
| 253 |
+
else
|
| 254 |
+
{
|
| 255 |
+
/* Attempt to get the injection library's entry-point */
|
| 256 |
+
init_fnptr = (NvtxInitializeInjectionNvtxFunc_t)NVTX_DLLFUNC(injectionLibraryHandle, initFuncName);
|
| 257 |
+
if (!init_fnptr)
|
| 258 |
+
{
|
| 259 |
+
NVTX_DLLCLOSE(injectionLibraryHandle);
|
| 260 |
+
NVTX_ERR("Failed to get address of function InitializeInjectionNvtx2 from injection library\n");
|
| 261 |
+
return NVTX_ERR_INIT_MISSING_LIBRARY_ENTRY_POINT;
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
}
|
| 266 |
+
#endif
|
| 267 |
+
|
| 268 |
+
#if NVTX_SUPPORT_STATIC_INJECTION_LIBRARY
|
| 269 |
+
if (!init_fnptr)
|
| 270 |
+
{
|
| 271 |
+
/* Check weakly-defined function pointer. A statically-linked injection can define this as
|
| 272 |
+
* a normal symbol and it will take precedence over a dynamic injection. */
|
| 273 |
+
if (InitializeInjectionNvtx2_fnptr)
|
| 274 |
+
{
|
| 275 |
+
init_fnptr = InitializeInjectionNvtx2_fnptr;
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
#endif
|
| 279 |
+
|
| 280 |
+
/* At this point, if init_fnptr is not set, then no tool has specified
|
| 281 |
+
* an NVTX injection library -- return non-success result so all NVTX
|
| 282 |
+
* API functions will be set to no-ops. */
|
| 283 |
+
if (!init_fnptr)
|
| 284 |
+
{
|
| 285 |
+
return NVTX_ERR_NO_INJECTION_LIBRARY_AVAILABLE;
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
/* Invoke injection library's initialization function. If it returns
|
| 289 |
+
* 0 (failure) and a dynamic injection was loaded, unload it. */
|
| 290 |
+
entryPointStatus = init_fnptr(NVTX_VERSIONED_IDENTIFIER(nvtxGetExportTable));
|
| 291 |
+
if (entryPointStatus == 0)
|
| 292 |
+
{
|
| 293 |
+
NVTX_ERR("Failed to initialize injection library -- initialization function returned 0\n");
|
| 294 |
+
if (injectionLibraryHandle)
|
| 295 |
+
{
|
| 296 |
+
NVTX_DLLCLOSE(injectionLibraryHandle);
|
| 297 |
+
}
|
| 298 |
+
return NVTX_ERR_INIT_FAILED_LIBRARY_ENTRY_POINT;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
return NVTX_SUCCESS;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)(void)
|
| 305 |
+
{
|
| 306 |
+
unsigned int old;
|
| 307 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState == NVTX_INIT_STATE_COMPLETE)
|
| 308 |
+
{
|
| 309 |
+
return;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
NVTX_ATOMIC_CAS_32(
|
| 313 |
+
old,
|
| 314 |
+
&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState,
|
| 315 |
+
NVTX_INIT_STATE_STARTED,
|
| 316 |
+
NVTX_INIT_STATE_FRESH);
|
| 317 |
+
if (old == NVTX_INIT_STATE_FRESH)
|
| 318 |
+
{
|
| 319 |
+
int result;
|
| 320 |
+
int forceAllToNoops;
|
| 321 |
+
|
| 322 |
+
/* Load & initialize injection library -- it will assign the function pointers */
|
| 323 |
+
result = NVTX_VERSIONED_IDENTIFIER(nvtxInitializeInjectionLibrary)();
|
| 324 |
+
|
| 325 |
+
/* Set all pointers not assigned by the injection to null */
|
| 326 |
+
forceAllToNoops = result != NVTX_SUCCESS; /* Set all to null if injection init failed */
|
| 327 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxSetInitFunctionsToNoops)(forceAllToNoops);
|
| 328 |
+
|
| 329 |
+
/* Signal that initialization has finished, so now the assigned function pointers will be used */
|
| 330 |
+
NVTX_ATOMIC_WRITE_32(
|
| 331 |
+
&NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState,
|
| 332 |
+
NVTX_INIT_STATE_COMPLETE);
|
| 333 |
+
}
|
| 334 |
+
else /* Spin-wait until initialization has finished */
|
| 335 |
+
{
|
| 336 |
+
NVTX_MEMBAR();
|
| 337 |
+
while (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).initState != NVTX_INIT_STATE_COMPLETE)
|
| 338 |
+
{
|
| 339 |
+
NVTX_YIELD();
|
| 340 |
+
NVTX_MEMBAR();
|
| 341 |
+
}
|
| 342 |
+
}
|
| 343 |
+
}
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDecls.h
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef NVTX_IMPL_GUARD
|
| 2 |
+
#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined).
|
| 3 |
+
#endif
|
| 4 |
+
|
| 5 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init)(const nvtxEventAttributes_t* eventAttrib);
|
| 6 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init)(const char* message);
|
| 7 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init)(const wchar_t* message);
|
| 8 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init)(const nvtxEventAttributes_t* eventAttrib);
|
| 9 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init)(const char* message);
|
| 10 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init)(const wchar_t* message);
|
| 11 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init)(nvtxRangeId_t id);
|
| 12 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init)(const nvtxEventAttributes_t* eventAttrib);
|
| 13 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init)(const char* message);
|
| 14 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init)(const wchar_t* message);
|
| 15 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init)(void);
|
| 16 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init)(uint32_t category, const char* name);
|
| 17 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init)(uint32_t category, const wchar_t* name);
|
| 18 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init)(uint32_t threadId, const char* name);
|
| 19 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init)(uint32_t threadId, const wchar_t* name);
|
| 20 |
+
|
| 21 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init)(nvtx_CUdevice device, const char* name);
|
| 22 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init)(nvtx_CUdevice device, const wchar_t* name);
|
| 23 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init)(nvtx_CUcontext context, const char* name);
|
| 24 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init)(nvtx_CUcontext context, const wchar_t* name);
|
| 25 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init)(nvtx_CUstream stream, const char* name);
|
| 26 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init)(nvtx_CUstream stream, const wchar_t* name);
|
| 27 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init)(nvtx_CUevent event, const char* name);
|
| 28 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init)(nvtx_CUevent event, const wchar_t* name);
|
| 29 |
+
|
| 30 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init)(nvtx_cl_device_id device, const char* name);
|
| 31 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init)(nvtx_cl_device_id device, const wchar_t* name);
|
| 32 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init)(nvtx_cl_context context, const char* name);
|
| 33 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init)(nvtx_cl_context context, const wchar_t* name);
|
| 34 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init)(nvtx_cl_command_queue command_queue, const char* name);
|
| 35 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init)(nvtx_cl_command_queue command_queue, const wchar_t* name);
|
| 36 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init)(nvtx_cl_mem memobj, const char* name);
|
| 37 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init)(nvtx_cl_mem memobj, const wchar_t* name);
|
| 38 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init)(nvtx_cl_sampler sampler, const char* name);
|
| 39 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init)(nvtx_cl_sampler sampler, const wchar_t* name);
|
| 40 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init)(nvtx_cl_program program, const char* name);
|
| 41 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init)(nvtx_cl_program program, const wchar_t* name);
|
| 42 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init)(nvtx_cl_event evnt, const char* name);
|
| 43 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init)(nvtx_cl_event evnt, const wchar_t* name);
|
| 44 |
+
|
| 45 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init)(int device, const char* name);
|
| 46 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init)(int device, const wchar_t* name);
|
| 47 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init)(nvtx_cudaStream_t stream, const char* name);
|
| 48 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init)(nvtx_cudaStream_t stream, const wchar_t* name);
|
| 49 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init)(nvtx_cudaEvent_t event, const char* name);
|
| 50 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init)(nvtx_cudaEvent_t event, const wchar_t* name);
|
| 51 |
+
|
| 52 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 53 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 54 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init)(nvtxDomainHandle_t domain, nvtxRangeId_t id);
|
| 55 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 56 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init)(nvtxDomainHandle_t domain);
|
| 57 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxResourceHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init)(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs);
|
| 58 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init)(nvtxResourceHandle_t resource);
|
| 59 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const char* name);
|
| 60 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name);
|
| 61 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init)(nvtxDomainHandle_t domain, const char* string);
|
| 62 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init)(nvtxDomainHandle_t domain, const wchar_t* string);
|
| 63 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init)(const char* message);
|
| 64 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init)(const wchar_t* message);
|
| 65 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init)(nvtxDomainHandle_t domain);
|
| 66 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init)(const void* reserved);
|
| 67 |
+
|
| 68 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION nvtxSyncUser_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs);
|
| 69 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init)(nvtxSyncUser_t handle);
|
| 70 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init)(nvtxSyncUser_t handle);
|
| 71 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init)(nvtxSyncUser_t handle);
|
| 72 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init)(nvtxSyncUser_t handle);
|
| 73 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init)(nvtxSyncUser_t handle);
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxInitDefs.h
ADDED
|
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef NVTX_IMPL_GUARD
|
| 2 |
+
#error Never include this file directly -- it is automatically included by nvToolsExt.h (except when NVTX_NO_IMPL is defined).
|
| 3 |
+
#endif
|
| 4 |
+
|
| 5 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init)(const nvtxEventAttributes_t* eventAttrib){
|
| 6 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 7 |
+
nvtxMarkEx(eventAttrib);
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init)(const char* message){
|
| 11 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 12 |
+
nvtxMarkA(message);
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init)(const wchar_t* message){
|
| 16 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 17 |
+
nvtxMarkW(message);
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init)(const nvtxEventAttributes_t* eventAttrib){
|
| 21 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 22 |
+
return nvtxRangeStartEx(eventAttrib);
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init)(const char* message){
|
| 26 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 27 |
+
return nvtxRangeStartA(message);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init)(const wchar_t* message){
|
| 31 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 32 |
+
return nvtxRangeStartW(message);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init)(nvtxRangeId_t id){
|
| 36 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 37 |
+
nvtxRangeEnd(id);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init)(const nvtxEventAttributes_t* eventAttrib){
|
| 41 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 42 |
+
return nvtxRangePushEx(eventAttrib);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init)(const char* message){
|
| 46 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 47 |
+
return nvtxRangePushA(message);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init)(const wchar_t* message){
|
| 51 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 52 |
+
return nvtxRangePushW(message);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init)(void){
|
| 56 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 57 |
+
return nvtxRangePop();
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init)(uint32_t category, const char* name){
|
| 61 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 62 |
+
nvtxNameCategoryA(category, name);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init)(uint32_t category, const wchar_t* name){
|
| 66 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 67 |
+
nvtxNameCategoryW(category, name);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init)(uint32_t threadId, const char* name){
|
| 71 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 72 |
+
nvtxNameOsThreadA(threadId, name);
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init)(uint32_t threadId, const wchar_t* name){
|
| 76 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 77 |
+
nvtxNameOsThreadW(threadId, name);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib){
|
| 81 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 82 |
+
nvtxDomainMarkEx(domain, eventAttrib);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxRangeId_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib){
|
| 86 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 87 |
+
return nvtxDomainRangeStartEx(domain, eventAttrib);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init)(nvtxDomainHandle_t domain, nvtxRangeId_t id){
|
| 91 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 92 |
+
nvtxDomainRangeEnd(domain, id);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib){
|
| 96 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 97 |
+
return nvtxDomainRangePushEx(domain, eventAttrib);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
NVTX_LINKONCE_DEFINE_FUNCTION int NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init)(nvtxDomainHandle_t domain){
|
| 101 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 102 |
+
return nvtxDomainRangePop(domain);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxResourceHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init)(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs){
|
| 106 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 107 |
+
return nvtxDomainResourceCreate(domain, attribs);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init)(nvtxResourceHandle_t resource){
|
| 111 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 112 |
+
nvtxDomainResourceDestroy(resource);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const char* name){
|
| 116 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 117 |
+
nvtxDomainNameCategoryA(domain, category, name);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init)(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name){
|
| 121 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 122 |
+
nvtxDomainNameCategoryW(domain, category, name);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init)(nvtxDomainHandle_t domain, const char* string){
|
| 126 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 127 |
+
return nvtxDomainRegisterStringA(domain, string);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxStringHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init)(nvtxDomainHandle_t domain, const wchar_t* string){
|
| 131 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 132 |
+
return nvtxDomainRegisterStringW(domain, string);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init)(const char* message){
|
| 136 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 137 |
+
return nvtxDomainCreateA(message);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxDomainHandle_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init)(const wchar_t* message){
|
| 141 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 142 |
+
return nvtxDomainCreateW(message);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init)(nvtxDomainHandle_t domain){
|
| 146 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 147 |
+
nvtxDomainDestroy(domain);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init)(const void* reserved){
|
| 151 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 152 |
+
nvtxInitialize(reserved);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init)(nvtx_CUdevice device, const char* name){
|
| 156 |
+
nvtxNameCuDeviceA_fakeimpl_fntype local;
|
| 157 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 158 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr;
|
| 159 |
+
if (local)
|
| 160 |
+
local(device, name);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init)(nvtx_CUdevice device, const wchar_t* name){
|
| 164 |
+
nvtxNameCuDeviceW_fakeimpl_fntype local;
|
| 165 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 166 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr;
|
| 167 |
+
if (local)
|
| 168 |
+
local(device, name);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init)(nvtx_CUcontext context, const char* name){
|
| 172 |
+
nvtxNameCuContextA_fakeimpl_fntype local;
|
| 173 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 174 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr;
|
| 175 |
+
if (local)
|
| 176 |
+
local(context, name);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init)(nvtx_CUcontext context, const wchar_t* name){
|
| 180 |
+
nvtxNameCuContextW_fakeimpl_fntype local;
|
| 181 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 182 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr;
|
| 183 |
+
if (local)
|
| 184 |
+
local(context, name);
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init)(nvtx_CUstream stream, const char* name){
|
| 188 |
+
nvtxNameCuStreamA_fakeimpl_fntype local;
|
| 189 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 190 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr;
|
| 191 |
+
if (local)
|
| 192 |
+
local(stream, name);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init)(nvtx_CUstream stream, const wchar_t* name){
|
| 196 |
+
nvtxNameCuStreamW_fakeimpl_fntype local;
|
| 197 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 198 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr;
|
| 199 |
+
if (local)
|
| 200 |
+
local(stream, name);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init)(nvtx_CUevent event, const char* name){
|
| 204 |
+
nvtxNameCuEventA_fakeimpl_fntype local;
|
| 205 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 206 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr;
|
| 207 |
+
if (local)
|
| 208 |
+
local(event, name);
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init)(nvtx_CUevent event, const wchar_t* name){
|
| 212 |
+
nvtxNameCuEventW_fakeimpl_fntype local;
|
| 213 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 214 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr;
|
| 215 |
+
if (local)
|
| 216 |
+
local(event, name);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init)(int device, const char* name){
|
| 220 |
+
nvtxNameCudaDeviceA_impl_fntype local;
|
| 221 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 222 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr;
|
| 223 |
+
if (local)
|
| 224 |
+
local(device, name);
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init)(int device, const wchar_t* name){
|
| 228 |
+
nvtxNameCudaDeviceW_impl_fntype local;
|
| 229 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 230 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr;
|
| 231 |
+
if (local)
|
| 232 |
+
local(device, name);
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init)(nvtx_cudaStream_t stream, const char* name){
|
| 236 |
+
nvtxNameCudaStreamA_fakeimpl_fntype local;
|
| 237 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 238 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr;
|
| 239 |
+
if (local)
|
| 240 |
+
local(stream, name);
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init)(nvtx_cudaStream_t stream, const wchar_t* name){
|
| 244 |
+
nvtxNameCudaStreamW_fakeimpl_fntype local;
|
| 245 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 246 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr;
|
| 247 |
+
if (local)
|
| 248 |
+
local(stream, name);
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init)(nvtx_cudaEvent_t event, const char* name){
|
| 252 |
+
nvtxNameCudaEventA_fakeimpl_fntype local;
|
| 253 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 254 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr;
|
| 255 |
+
if (local)
|
| 256 |
+
local(event, name);
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init)(nvtx_cudaEvent_t event, const wchar_t* name){
|
| 260 |
+
nvtxNameCudaEventW_fakeimpl_fntype local;
|
| 261 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 262 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr;
|
| 263 |
+
if (local)
|
| 264 |
+
local(event, name);
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init)(nvtx_cl_device_id device, const char* name){
|
| 268 |
+
nvtxNameClDeviceA_fakeimpl_fntype local;
|
| 269 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 270 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr;
|
| 271 |
+
if (local)
|
| 272 |
+
local(device, name);
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init)(nvtx_cl_device_id device, const wchar_t* name){
|
| 276 |
+
nvtxNameClDeviceW_fakeimpl_fntype local;
|
| 277 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 278 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr;
|
| 279 |
+
if (local)
|
| 280 |
+
local(device, name);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init)(nvtx_cl_context context, const char* name){
|
| 284 |
+
nvtxNameClContextA_fakeimpl_fntype local;
|
| 285 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 286 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr;
|
| 287 |
+
if (local)
|
| 288 |
+
local(context, name);
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init)(nvtx_cl_context context, const wchar_t* name){
|
| 292 |
+
nvtxNameClContextW_fakeimpl_fntype local;
|
| 293 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 294 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr;
|
| 295 |
+
if (local)
|
| 296 |
+
local(context, name);
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init)(nvtx_cl_command_queue command_queue, const char* name){
|
| 300 |
+
nvtxNameClCommandQueueA_fakeimpl_fntype local;
|
| 301 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 302 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr;
|
| 303 |
+
if (local)
|
| 304 |
+
local(command_queue, name);
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init)(nvtx_cl_command_queue command_queue, const wchar_t* name){
|
| 308 |
+
nvtxNameClCommandQueueW_fakeimpl_fntype local;
|
| 309 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 310 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr;
|
| 311 |
+
if (local)
|
| 312 |
+
local(command_queue, name);
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init)(nvtx_cl_mem memobj, const char* name){
|
| 316 |
+
nvtxNameClMemObjectA_fakeimpl_fntype local;
|
| 317 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 318 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr;
|
| 319 |
+
if (local)
|
| 320 |
+
local(memobj, name);
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init)(nvtx_cl_mem memobj, const wchar_t* name){
|
| 324 |
+
nvtxNameClMemObjectW_fakeimpl_fntype local;
|
| 325 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 326 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr;
|
| 327 |
+
if (local)
|
| 328 |
+
local(memobj, name);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init)(nvtx_cl_sampler sampler, const char* name){
|
| 332 |
+
nvtxNameClSamplerA_fakeimpl_fntype local;
|
| 333 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 334 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr;
|
| 335 |
+
if (local)
|
| 336 |
+
local(sampler, name);
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init)(nvtx_cl_sampler sampler, const wchar_t* name){
|
| 340 |
+
nvtxNameClSamplerW_fakeimpl_fntype local;
|
| 341 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 342 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr;
|
| 343 |
+
if (local)
|
| 344 |
+
local(sampler, name);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init)(nvtx_cl_program program, const char* name){
|
| 348 |
+
nvtxNameClProgramA_fakeimpl_fntype local;
|
| 349 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 350 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr;
|
| 351 |
+
if (local)
|
| 352 |
+
local(program, name);
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init)(nvtx_cl_program program, const wchar_t* name){
|
| 356 |
+
nvtxNameClProgramW_fakeimpl_fntype local;
|
| 357 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 358 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr;
|
| 359 |
+
if (local)
|
| 360 |
+
local(program, name);
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init)(nvtx_cl_event evnt, const char* name){
|
| 364 |
+
nvtxNameClEventA_fakeimpl_fntype local;
|
| 365 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 366 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr;
|
| 367 |
+
if (local)
|
| 368 |
+
local(evnt, name);
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init)(nvtx_cl_event evnt, const wchar_t* name){
|
| 372 |
+
nvtxNameClEventW_fakeimpl_fntype local;
|
| 373 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 374 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr;
|
| 375 |
+
if (local)
|
| 376 |
+
local(evnt, name);
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
NVTX_LINKONCE_DEFINE_FUNCTION nvtxSyncUser_t NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs){
|
| 380 |
+
nvtxDomainSyncUserCreate_impl_fntype local;
|
| 381 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 382 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr;
|
| 383 |
+
if (local) {
|
| 384 |
+
return local(domain, attribs);
|
| 385 |
+
}
|
| 386 |
+
return (nvtxSyncUser_t)0;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init)(nvtxSyncUser_t handle){
|
| 390 |
+
nvtxDomainSyncUserDestroy_impl_fntype local;
|
| 391 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 392 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr;
|
| 393 |
+
if (local)
|
| 394 |
+
local(handle);
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init)(nvtxSyncUser_t handle){
|
| 398 |
+
nvtxDomainSyncUserAcquireStart_impl_fntype local;
|
| 399 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 400 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr;
|
| 401 |
+
if (local)
|
| 402 |
+
local(handle);
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init)(nvtxSyncUser_t handle){
|
| 406 |
+
nvtxDomainSyncUserAcquireFailed_impl_fntype local;
|
| 407 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 408 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr;
|
| 409 |
+
if (local)
|
| 410 |
+
local(handle);
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init)(nvtxSyncUser_t handle){
|
| 414 |
+
nvtxDomainSyncUserAcquireSuccess_impl_fntype local;
|
| 415 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 416 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr;
|
| 417 |
+
if (local)
|
| 418 |
+
local(handle);
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_API NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init)(nvtxSyncUser_t handle){
|
| 422 |
+
nvtxDomainSyncUserReleasing_impl_fntype local;
|
| 423 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxInitOnce)();
|
| 424 |
+
local = NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr;
|
| 425 |
+
if (local)
|
| 426 |
+
local(handle);
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
NVTX_LINKONCE_FWDDECL_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxSetInitFunctionsToNoops)(int forceAllToNoops);
|
| 430 |
+
NVTX_LINKONCE_DEFINE_FUNCTION void NVTX_VERSIONED_IDENTIFIER(nvtxSetInitFunctionsToNoops)(int forceAllToNoops)
|
| 431 |
+
{
|
| 432 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxMarkEx_impl_init) || forceAllToNoops)
|
| 433 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkEx_impl_fnptr = NULL;
|
| 434 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxMarkA_impl_init) || forceAllToNoops)
|
| 435 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkA_impl_fnptr = NULL;
|
| 436 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxMarkW_impl_init) || forceAllToNoops)
|
| 437 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxMarkW_impl_fnptr = NULL;
|
| 438 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartEx_impl_init) || forceAllToNoops)
|
| 439 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartEx_impl_fnptr = NULL;
|
| 440 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartA_impl_init) || forceAllToNoops)
|
| 441 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartA_impl_fnptr = NULL;
|
| 442 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeStartW_impl_init) || forceAllToNoops)
|
| 443 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeStartW_impl_fnptr = NULL;
|
| 444 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangeEnd_impl_init) || forceAllToNoops)
|
| 445 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangeEnd_impl_fnptr = NULL;
|
| 446 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePushEx_impl_init) || forceAllToNoops)
|
| 447 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushEx_impl_fnptr = NULL;
|
| 448 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePushA_impl_init) || forceAllToNoops)
|
| 449 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushA_impl_fnptr = NULL;
|
| 450 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePushW_impl_init) || forceAllToNoops)
|
| 451 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePushW_impl_fnptr = NULL;
|
| 452 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxRangePop_impl_init) || forceAllToNoops)
|
| 453 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxRangePop_impl_fnptr = NULL;
|
| 454 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryA_impl_init) || forceAllToNoops)
|
| 455 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryA_impl_fnptr = NULL;
|
| 456 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCategoryW_impl_init) || forceAllToNoops)
|
| 457 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCategoryW_impl_fnptr = NULL;
|
| 458 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadA_impl_init) || forceAllToNoops)
|
| 459 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadA_impl_fnptr = NULL;
|
| 460 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameOsThreadW_impl_init) || forceAllToNoops)
|
| 461 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameOsThreadW_impl_fnptr = NULL;
|
| 462 |
+
|
| 463 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceA_impl_init) || forceAllToNoops)
|
| 464 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceA_impl_fnptr = NULL;
|
| 465 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuDeviceW_impl_init) || forceAllToNoops)
|
| 466 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuDeviceW_impl_fnptr = NULL;
|
| 467 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextA_impl_init) || forceAllToNoops)
|
| 468 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextA_impl_fnptr = NULL;
|
| 469 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuContextW_impl_init) || forceAllToNoops)
|
| 470 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuContextW_impl_fnptr = NULL;
|
| 471 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamA_impl_init) || forceAllToNoops)
|
| 472 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamA_impl_fnptr = NULL;
|
| 473 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuStreamW_impl_init) || forceAllToNoops)
|
| 474 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuStreamW_impl_fnptr = NULL;
|
| 475 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventA_impl_init) || forceAllToNoops)
|
| 476 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventA_impl_fnptr = NULL;
|
| 477 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCuEventW_impl_init) || forceAllToNoops)
|
| 478 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCuEventW_impl_fnptr = NULL;
|
| 479 |
+
|
| 480 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceA_impl_init) || forceAllToNoops)
|
| 481 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceA_impl_fnptr = NULL;
|
| 482 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClDeviceW_impl_init) || forceAllToNoops)
|
| 483 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClDeviceW_impl_fnptr = NULL;
|
| 484 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextA_impl_init) || forceAllToNoops)
|
| 485 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextA_impl_fnptr = NULL;
|
| 486 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClContextW_impl_init) || forceAllToNoops)
|
| 487 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClContextW_impl_fnptr = NULL;
|
| 488 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueA_impl_init) || forceAllToNoops)
|
| 489 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueA_impl_fnptr = NULL;
|
| 490 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClCommandQueueW_impl_init) || forceAllToNoops)
|
| 491 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClCommandQueueW_impl_fnptr = NULL;
|
| 492 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectA_impl_init) || forceAllToNoops)
|
| 493 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectA_impl_fnptr = NULL;
|
| 494 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClMemObjectW_impl_init) || forceAllToNoops)
|
| 495 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClMemObjectW_impl_fnptr = NULL;
|
| 496 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerA_impl_init) || forceAllToNoops)
|
| 497 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerA_impl_fnptr = NULL;
|
| 498 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClSamplerW_impl_init) || forceAllToNoops)
|
| 499 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClSamplerW_impl_fnptr = NULL;
|
| 500 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramA_impl_init) || forceAllToNoops)
|
| 501 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramA_impl_fnptr = NULL;
|
| 502 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClProgramW_impl_init) || forceAllToNoops)
|
| 503 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClProgramW_impl_fnptr = NULL;
|
| 504 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventA_impl_init) || forceAllToNoops)
|
| 505 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventA_impl_fnptr = NULL;
|
| 506 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameClEventW_impl_init) || forceAllToNoops)
|
| 507 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameClEventW_impl_fnptr = NULL;
|
| 508 |
+
|
| 509 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceA_impl_init) || forceAllToNoops)
|
| 510 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceA_impl_fnptr = NULL;
|
| 511 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaDeviceW_impl_init) || forceAllToNoops)
|
| 512 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaDeviceW_impl_fnptr = NULL;
|
| 513 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamA_impl_init) || forceAllToNoops)
|
| 514 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamA_impl_fnptr = NULL;
|
| 515 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaStreamW_impl_init) || forceAllToNoops)
|
| 516 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaStreamW_impl_fnptr = NULL;
|
| 517 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventA_impl_init) || forceAllToNoops)
|
| 518 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventA_impl_fnptr = NULL;
|
| 519 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxNameCudaEventW_impl_init) || forceAllToNoops)
|
| 520 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxNameCudaEventW_impl_fnptr = NULL;
|
| 521 |
+
|
| 522 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainMarkEx_impl_init) || forceAllToNoops)
|
| 523 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainMarkEx_impl_fnptr = NULL;
|
| 524 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeStartEx_impl_init) || forceAllToNoops)
|
| 525 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeStartEx_impl_fnptr = NULL;
|
| 526 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangeEnd_impl_init) || forceAllToNoops)
|
| 527 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangeEnd_impl_fnptr = NULL;
|
| 528 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePushEx_impl_init) || forceAllToNoops)
|
| 529 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePushEx_impl_fnptr = NULL;
|
| 530 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRangePop_impl_init) || forceAllToNoops)
|
| 531 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRangePop_impl_fnptr = NULL;
|
| 532 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceCreate_impl_init) || forceAllToNoops)
|
| 533 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceCreate_impl_fnptr = NULL;
|
| 534 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainResourceDestroy_impl_init) || forceAllToNoops)
|
| 535 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainResourceDestroy_impl_fnptr = NULL;
|
| 536 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryA_impl_init) || forceAllToNoops)
|
| 537 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryA_impl_fnptr = NULL;
|
| 538 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainNameCategoryW_impl_init) || forceAllToNoops)
|
| 539 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainNameCategoryW_impl_fnptr = NULL;
|
| 540 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringA_impl_init) || forceAllToNoops)
|
| 541 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringA_impl_fnptr = NULL;
|
| 542 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainRegisterStringW_impl_init) || forceAllToNoops)
|
| 543 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainRegisterStringW_impl_fnptr = NULL;
|
| 544 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateA_impl_init) || forceAllToNoops)
|
| 545 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateA_impl_fnptr = NULL;
|
| 546 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainCreateW_impl_init) || forceAllToNoops)
|
| 547 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainCreateW_impl_fnptr = NULL;
|
| 548 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainDestroy_impl_init) || forceAllToNoops)
|
| 549 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainDestroy_impl_fnptr = NULL;
|
| 550 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxInitialize_impl_init) || forceAllToNoops)
|
| 551 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxInitialize_impl_fnptr = NULL;
|
| 552 |
+
|
| 553 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserCreate_impl_init) || forceAllToNoops)
|
| 554 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserCreate_impl_fnptr = NULL;
|
| 555 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserDestroy_impl_init) || forceAllToNoops)
|
| 556 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserDestroy_impl_fnptr = NULL;
|
| 557 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireStart_impl_init) || forceAllToNoops)
|
| 558 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireStart_impl_fnptr = NULL;
|
| 559 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireFailed_impl_init) || forceAllToNoops)
|
| 560 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireFailed_impl_fnptr = NULL;
|
| 561 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserAcquireSuccess_impl_init) || forceAllToNoops)
|
| 562 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserAcquireSuccess_impl_fnptr = NULL;
|
| 563 |
+
if (NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr == NVTX_VERSIONED_IDENTIFIER(nvtxDomainSyncUserReleasing_impl_init) || forceAllToNoops)
|
| 564 |
+
NVTX_VERSIONED_IDENTIFIER(nvtxGlobals).nvtxDomainSyncUserReleasing_impl_fnptr = NULL;
|
| 565 |
+
}
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxLinkOnce.h
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef __NVTX_LINKONCE_H__
|
| 2 |
+
#define __NVTX_LINKONCE_H__
|
| 3 |
+
|
| 4 |
+
/* This header defines macros to permit making definitions of global variables
|
| 5 |
+
* and functions in C/C++ header files which may be included multiple times in
|
| 6 |
+
* a translation unit or linkage unit. It allows authoring header-only libraries
|
| 7 |
+
* which can be used by multiple other header-only libraries (either as the same
|
| 8 |
+
* copy or multiple copies), and does not require any build changes, such as
|
| 9 |
+
* adding another .c file, linking a static library, or deploying a dynamic
|
| 10 |
+
* library. Globals defined with these macros have the property that they have
|
| 11 |
+
* the same address, pointing to a single instance, for the entire linkage unit.
|
| 12 |
+
* It is expected but not guaranteed that each linkage unit will have a separate
|
| 13 |
+
* instance.
|
| 14 |
+
*
|
| 15 |
+
* In some situations it is desirable to declare a variable without initializing
|
| 16 |
+
* it, refer to it in code or other variables' initializers, and then initialize
|
| 17 |
+
* it later. Similarly, functions can be prototyped, have their address taken,
|
| 18 |
+
* and then have their body defined later. In such cases, use the FWDDECL macros
|
| 19 |
+
* when forward-declaring LINKONCE global variables without initializers and
|
| 20 |
+
* function prototypes, and then use the DEFINE macros when later defining them.
|
| 21 |
+
* Although in many cases the FWDDECL macro is equivalent to the DEFINE macro,
|
| 22 |
+
* following this pattern makes code maximally portable.
|
| 23 |
+
*/
|
| 24 |
+
|
| 25 |
+
#if defined(__MINGW32__) /* MinGW */
|
| 26 |
+
#define NVTX_LINKONCE_WEAK __attribute__((section(".gnu.linkonce.0.")))
|
| 27 |
+
#if defined(__cplusplus)
|
| 28 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany)
|
| 29 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" inline NVTX_LINKONCE_WEAK
|
| 30 |
+
#else
|
| 31 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany)
|
| 32 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_WEAK
|
| 33 |
+
#endif
|
| 34 |
+
#elif defined(_MSC_VER) /* MSVC */
|
| 35 |
+
#if defined(__cplusplus)
|
| 36 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL extern "C" __declspec(selectany)
|
| 37 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" inline
|
| 38 |
+
#else
|
| 39 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany)
|
| 40 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION __inline
|
| 41 |
+
#endif
|
| 42 |
+
#elif defined(__CYGWIN__) && defined(__clang__) /* Clang on Cygwin */
|
| 43 |
+
#define NVTX_LINKONCE_WEAK __attribute__((section(".gnu.linkonce.0.")))
|
| 44 |
+
#if defined(__cplusplus)
|
| 45 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_WEAK
|
| 46 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" NVTX_LINKONCE_WEAK
|
| 47 |
+
#else
|
| 48 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_WEAK
|
| 49 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_WEAK
|
| 50 |
+
#endif
|
| 51 |
+
#elif defined(__CYGWIN__) /* Assume GCC or compatible */
|
| 52 |
+
#define NVTX_LINKONCE_WEAK __attribute__((weak))
|
| 53 |
+
#if defined(__cplusplus)
|
| 54 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL __declspec(selectany)
|
| 55 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" inline
|
| 56 |
+
#else
|
| 57 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_WEAK
|
| 58 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_WEAK
|
| 59 |
+
#endif
|
| 60 |
+
#else /* All others: Assume GCC, clang, or compatible */
|
| 61 |
+
#define NVTX_LINKONCE_WEAK __attribute__((weak))
|
| 62 |
+
#define NVTX_LINKONCE_HIDDEN __attribute__((visibility("hidden")))
|
| 63 |
+
#if defined(__cplusplus)
|
| 64 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_HIDDEN NVTX_LINKONCE_WEAK
|
| 65 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION extern "C" NVTX_LINKONCE_HIDDEN inline
|
| 66 |
+
#else
|
| 67 |
+
#define NVTX_LINKONCE_DEFINE_GLOBAL NVTX_LINKONCE_HIDDEN NVTX_LINKONCE_WEAK
|
| 68 |
+
#define NVTX_LINKONCE_DEFINE_FUNCTION NVTX_LINKONCE_HIDDEN NVTX_LINKONCE_WEAK
|
| 69 |
+
#endif
|
| 70 |
+
#endif
|
| 71 |
+
|
| 72 |
+
#define NVTX_LINKONCE_FWDDECL_GLOBAL NVTX_LINKONCE_DEFINE_GLOBAL extern
|
| 73 |
+
#define NVTX_LINKONCE_FWDDECL_FUNCTION NVTX_LINKONCE_DEFINE_FUNCTION
|
| 74 |
+
|
| 75 |
+
#endif /* __NVTX_LINKONCE_H__ */
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/nvtx3/nvtxDetail/nvtxTypes.h
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2009-2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO USER:
|
| 5 |
+
*
|
| 6 |
+
* This source code is subject to NVIDIA ownership rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* This software and the information contained herein is PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions
|
| 11 |
+
* of a form of NVIDIA software license agreement.
|
| 12 |
+
*
|
| 13 |
+
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
|
| 14 |
+
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
|
| 15 |
+
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
|
| 16 |
+
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
|
| 17 |
+
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 18 |
+
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
|
| 19 |
+
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
| 20 |
+
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
|
| 21 |
+
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
|
| 22 |
+
* OR PERFORMANCE OF THIS SOURCE CODE.
|
| 23 |
+
*
|
| 24 |
+
* U.S. Government End Users. This source code is a "commercial item" as
|
| 25 |
+
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
|
| 26 |
+
* "commercial computer software" and "commercial computer software
|
| 27 |
+
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
|
| 28 |
+
* and is provided to the U.S. Government only as a commercial end item.
|
| 29 |
+
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
|
| 30 |
+
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
|
| 31 |
+
* source code with only those rights set forth herein.
|
| 32 |
+
*
|
| 33 |
+
* Any use of this source code in individual and commercial software must
|
| 34 |
+
* include, in the user documentation and internal comments to the code,
|
| 35 |
+
* the above Disclaimer and U.S. Government End Users Notice.
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
/* This header defines types which are used by the internal implementation
|
| 39 |
+
* of NVTX and callback subscribers. API clients do not use these types,
|
| 40 |
+
* so they are defined here instead of in nvToolsExt.h to clarify they are
|
| 41 |
+
* not part of the NVTX client API. */
|
| 42 |
+
|
| 43 |
+
#ifndef NVTX_IMPL_GUARD
|
| 44 |
+
#error Never include this file directly -- it is automatically included by nvToolsExt.h.
|
| 45 |
+
#endif
|
| 46 |
+
|
| 47 |
+
/* ------ Dependency-free types binary-compatible with real types ------- */
|
| 48 |
+
|
| 49 |
+
/* In order to avoid having the NVTX core API headers depend on non-NVTX
|
| 50 |
+
* headers like cuda.h, NVTX defines binary-compatible types to use for
|
| 51 |
+
* safely making the initialization versions of all NVTX functions without
|
| 52 |
+
* needing to have definitions for the real types. */
|
| 53 |
+
|
| 54 |
+
typedef int nvtx_CUdevice;
|
| 55 |
+
typedef void* nvtx_CUcontext;
|
| 56 |
+
typedef void* nvtx_CUstream;
|
| 57 |
+
typedef void* nvtx_CUevent;
|
| 58 |
+
|
| 59 |
+
typedef void* nvtx_cudaStream_t;
|
| 60 |
+
typedef void* nvtx_cudaEvent_t;
|
| 61 |
+
|
| 62 |
+
typedef void* nvtx_cl_platform_id;
|
| 63 |
+
typedef void* nvtx_cl_device_id;
|
| 64 |
+
typedef void* nvtx_cl_context;
|
| 65 |
+
typedef void* nvtx_cl_command_queue;
|
| 66 |
+
typedef void* nvtx_cl_mem;
|
| 67 |
+
typedef void* nvtx_cl_program;
|
| 68 |
+
typedef void* nvtx_cl_kernel;
|
| 69 |
+
typedef void* nvtx_cl_event;
|
| 70 |
+
typedef void* nvtx_cl_sampler;
|
| 71 |
+
|
| 72 |
+
typedef struct nvtxSyncUser* nvtxSyncUser_t;
|
| 73 |
+
struct nvtxSyncUserAttributes_v0;
|
| 74 |
+
typedef struct nvtxSyncUserAttributes_v0 nvtxSyncUserAttributes_t;
|
| 75 |
+
|
| 76 |
+
/* --------- Types for function pointers (with fake API types) ---------- */
|
| 77 |
+
|
| 78 |
+
typedef void (NVTX_API * nvtxMarkEx_impl_fntype)(const nvtxEventAttributes_t* eventAttrib);
|
| 79 |
+
typedef void (NVTX_API * nvtxMarkA_impl_fntype)(const char* message);
|
| 80 |
+
typedef void (NVTX_API * nvtxMarkW_impl_fntype)(const wchar_t* message);
|
| 81 |
+
typedef nvtxRangeId_t (NVTX_API * nvtxRangeStartEx_impl_fntype)(const nvtxEventAttributes_t* eventAttrib);
|
| 82 |
+
typedef nvtxRangeId_t (NVTX_API * nvtxRangeStartA_impl_fntype)(const char* message);
|
| 83 |
+
typedef nvtxRangeId_t (NVTX_API * nvtxRangeStartW_impl_fntype)(const wchar_t* message);
|
| 84 |
+
typedef void (NVTX_API * nvtxRangeEnd_impl_fntype)(nvtxRangeId_t id);
|
| 85 |
+
typedef int (NVTX_API * nvtxRangePushEx_impl_fntype)(const nvtxEventAttributes_t* eventAttrib);
|
| 86 |
+
typedef int (NVTX_API * nvtxRangePushA_impl_fntype)(const char* message);
|
| 87 |
+
typedef int (NVTX_API * nvtxRangePushW_impl_fntype)(const wchar_t* message);
|
| 88 |
+
typedef int (NVTX_API * nvtxRangePop_impl_fntype)(void);
|
| 89 |
+
typedef void (NVTX_API * nvtxNameCategoryA_impl_fntype)(uint32_t category, const char* name);
|
| 90 |
+
typedef void (NVTX_API * nvtxNameCategoryW_impl_fntype)(uint32_t category, const wchar_t* name);
|
| 91 |
+
typedef void (NVTX_API * nvtxNameOsThreadA_impl_fntype)(uint32_t threadId, const char* name);
|
| 92 |
+
typedef void (NVTX_API * nvtxNameOsThreadW_impl_fntype)(uint32_t threadId, const wchar_t* name);
|
| 93 |
+
|
| 94 |
+
/* Real impl types are defined in nvtxImplCuda_v3.h, where CUDA headers are included */
|
| 95 |
+
typedef void (NVTX_API * nvtxNameCuDeviceA_fakeimpl_fntype)(nvtx_CUdevice device, const char* name);
|
| 96 |
+
typedef void (NVTX_API * nvtxNameCuDeviceW_fakeimpl_fntype)(nvtx_CUdevice device, const wchar_t* name);
|
| 97 |
+
typedef void (NVTX_API * nvtxNameCuContextA_fakeimpl_fntype)(nvtx_CUcontext context, const char* name);
|
| 98 |
+
typedef void (NVTX_API * nvtxNameCuContextW_fakeimpl_fntype)(nvtx_CUcontext context, const wchar_t* name);
|
| 99 |
+
typedef void (NVTX_API * nvtxNameCuStreamA_fakeimpl_fntype)(nvtx_CUstream stream, const char* name);
|
| 100 |
+
typedef void (NVTX_API * nvtxNameCuStreamW_fakeimpl_fntype)(nvtx_CUstream stream, const wchar_t* name);
|
| 101 |
+
typedef void (NVTX_API * nvtxNameCuEventA_fakeimpl_fntype)(nvtx_CUevent event, const char* name);
|
| 102 |
+
typedef void (NVTX_API * nvtxNameCuEventW_fakeimpl_fntype)(nvtx_CUevent event, const wchar_t* name);
|
| 103 |
+
|
| 104 |
+
/* Real impl types are defined in nvtxImplOpenCL_v3.h, where OPENCL headers are included */
|
| 105 |
+
typedef void (NVTX_API * nvtxNameClDeviceA_fakeimpl_fntype)(nvtx_cl_device_id device, const char* name);
|
| 106 |
+
typedef void (NVTX_API * nvtxNameClDeviceW_fakeimpl_fntype)(nvtx_cl_device_id device, const wchar_t* name);
|
| 107 |
+
typedef void (NVTX_API * nvtxNameClContextA_fakeimpl_fntype)(nvtx_cl_context context, const char* name);
|
| 108 |
+
typedef void (NVTX_API * nvtxNameClContextW_fakeimpl_fntype)(nvtx_cl_context context, const wchar_t* name);
|
| 109 |
+
typedef void (NVTX_API * nvtxNameClCommandQueueA_fakeimpl_fntype)(nvtx_cl_command_queue command_queue, const char* name);
|
| 110 |
+
typedef void (NVTX_API * nvtxNameClCommandQueueW_fakeimpl_fntype)(nvtx_cl_command_queue command_queue, const wchar_t* name);
|
| 111 |
+
typedef void (NVTX_API * nvtxNameClMemObjectA_fakeimpl_fntype)(nvtx_cl_mem memobj, const char* name);
|
| 112 |
+
typedef void (NVTX_API * nvtxNameClMemObjectW_fakeimpl_fntype)(nvtx_cl_mem memobj, const wchar_t* name);
|
| 113 |
+
typedef void (NVTX_API * nvtxNameClSamplerA_fakeimpl_fntype)(nvtx_cl_sampler sampler, const char* name);
|
| 114 |
+
typedef void (NVTX_API * nvtxNameClSamplerW_fakeimpl_fntype)(nvtx_cl_sampler sampler, const wchar_t* name);
|
| 115 |
+
typedef void (NVTX_API * nvtxNameClProgramA_fakeimpl_fntype)(nvtx_cl_program program, const char* name);
|
| 116 |
+
typedef void (NVTX_API * nvtxNameClProgramW_fakeimpl_fntype)(nvtx_cl_program program, const wchar_t* name);
|
| 117 |
+
typedef void (NVTX_API * nvtxNameClEventA_fakeimpl_fntype)(nvtx_cl_event evnt, const char* name);
|
| 118 |
+
typedef void (NVTX_API * nvtxNameClEventW_fakeimpl_fntype)(nvtx_cl_event evnt, const wchar_t* name);
|
| 119 |
+
|
| 120 |
+
/* Real impl types are defined in nvtxImplCudaRt_v3.h, where CUDART headers are included */
|
| 121 |
+
typedef void (NVTX_API * nvtxNameCudaDeviceA_impl_fntype)(int device, const char* name);
|
| 122 |
+
typedef void (NVTX_API * nvtxNameCudaDeviceW_impl_fntype)(int device, const wchar_t* name);
|
| 123 |
+
typedef void (NVTX_API * nvtxNameCudaStreamA_fakeimpl_fntype)(nvtx_cudaStream_t stream, const char* name);
|
| 124 |
+
typedef void (NVTX_API * nvtxNameCudaStreamW_fakeimpl_fntype)(nvtx_cudaStream_t stream, const wchar_t* name);
|
| 125 |
+
typedef void (NVTX_API * nvtxNameCudaEventA_fakeimpl_fntype)(nvtx_cudaEvent_t event, const char* name);
|
| 126 |
+
typedef void (NVTX_API * nvtxNameCudaEventW_fakeimpl_fntype)(nvtx_cudaEvent_t event, const wchar_t* name);
|
| 127 |
+
|
| 128 |
+
typedef void (NVTX_API * nvtxDomainMarkEx_impl_fntype)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 129 |
+
typedef nvtxRangeId_t (NVTX_API * nvtxDomainRangeStartEx_impl_fntype)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 130 |
+
typedef void (NVTX_API * nvtxDomainRangeEnd_impl_fntype)(nvtxDomainHandle_t domain, nvtxRangeId_t id);
|
| 131 |
+
typedef int (NVTX_API * nvtxDomainRangePushEx_impl_fntype)(nvtxDomainHandle_t domain, const nvtxEventAttributes_t* eventAttrib);
|
| 132 |
+
typedef int (NVTX_API * nvtxDomainRangePop_impl_fntype)(nvtxDomainHandle_t domain);
|
| 133 |
+
typedef nvtxResourceHandle_t (NVTX_API * nvtxDomainResourceCreate_impl_fntype)(nvtxDomainHandle_t domain, nvtxResourceAttributes_t* attribs);
|
| 134 |
+
typedef void (NVTX_API * nvtxDomainResourceDestroy_impl_fntype)(nvtxResourceHandle_t resource);
|
| 135 |
+
typedef void (NVTX_API * nvtxDomainNameCategoryA_impl_fntype)(nvtxDomainHandle_t domain, uint32_t category, const char* name);
|
| 136 |
+
typedef void (NVTX_API * nvtxDomainNameCategoryW_impl_fntype)(nvtxDomainHandle_t domain, uint32_t category, const wchar_t* name);
|
| 137 |
+
typedef nvtxStringHandle_t (NVTX_API * nvtxDomainRegisterStringA_impl_fntype)(nvtxDomainHandle_t domain, const char* string);
|
| 138 |
+
typedef nvtxStringHandle_t (NVTX_API * nvtxDomainRegisterStringW_impl_fntype)(nvtxDomainHandle_t domain, const wchar_t* string);
|
| 139 |
+
typedef nvtxDomainHandle_t (NVTX_API * nvtxDomainCreateA_impl_fntype)(const char* message);
|
| 140 |
+
typedef nvtxDomainHandle_t (NVTX_API * nvtxDomainCreateW_impl_fntype)(const wchar_t* message);
|
| 141 |
+
typedef void (NVTX_API * nvtxDomainDestroy_impl_fntype)(nvtxDomainHandle_t domain);
|
| 142 |
+
typedef void (NVTX_API * nvtxInitialize_impl_fntype)(const void* reserved);
|
| 143 |
+
|
| 144 |
+
typedef nvtxSyncUser_t (NVTX_API * nvtxDomainSyncUserCreate_impl_fntype)(nvtxDomainHandle_t domain, const nvtxSyncUserAttributes_t* attribs);
|
| 145 |
+
typedef void (NVTX_API * nvtxDomainSyncUserDestroy_impl_fntype)(nvtxSyncUser_t handle);
|
| 146 |
+
typedef void (NVTX_API * nvtxDomainSyncUserAcquireStart_impl_fntype)(nvtxSyncUser_t handle);
|
| 147 |
+
typedef void (NVTX_API * nvtxDomainSyncUserAcquireFailed_impl_fntype)(nvtxSyncUser_t handle);
|
| 148 |
+
typedef void (NVTX_API * nvtxDomainSyncUserAcquireSuccess_impl_fntype)(nvtxSyncUser_t handle);
|
| 149 |
+
typedef void (NVTX_API * nvtxDomainSyncUserReleasing_impl_fntype)(nvtxSyncUser_t handle);
|
| 150 |
+
|
| 151 |
+
/* ---------------- Types for callback subscription --------------------- */
|
| 152 |
+
|
| 153 |
+
typedef const void *(NVTX_API * NvtxGetExportTableFunc_t)(uint32_t exportTableId);
|
| 154 |
+
typedef int (NVTX_API * NvtxInitializeInjectionNvtxFunc_t)(NvtxGetExportTableFunc_t exportTable);
|
| 155 |
+
|
| 156 |
+
typedef enum NvtxCallbackModule
|
| 157 |
+
{
|
| 158 |
+
NVTX_CB_MODULE_INVALID = 0,
|
| 159 |
+
NVTX_CB_MODULE_CORE = 1,
|
| 160 |
+
NVTX_CB_MODULE_CUDA = 2,
|
| 161 |
+
NVTX_CB_MODULE_OPENCL = 3,
|
| 162 |
+
NVTX_CB_MODULE_CUDART = 4,
|
| 163 |
+
NVTX_CB_MODULE_CORE2 = 5,
|
| 164 |
+
NVTX_CB_MODULE_SYNC = 6,
|
| 165 |
+
/* --- New constants must only be added directly above this line --- */
|
| 166 |
+
NVTX_CB_MODULE_SIZE,
|
| 167 |
+
NVTX_CB_MODULE_FORCE_INT = 0x7fffffff
|
| 168 |
+
} NvtxCallbackModule;
|
| 169 |
+
|
| 170 |
+
typedef enum NvtxCallbackIdCore
|
| 171 |
+
{
|
| 172 |
+
NVTX_CBID_CORE_INVALID = 0,
|
| 173 |
+
NVTX_CBID_CORE_MarkEx = 1,
|
| 174 |
+
NVTX_CBID_CORE_MarkA = 2,
|
| 175 |
+
NVTX_CBID_CORE_MarkW = 3,
|
| 176 |
+
NVTX_CBID_CORE_RangeStartEx = 4,
|
| 177 |
+
NVTX_CBID_CORE_RangeStartA = 5,
|
| 178 |
+
NVTX_CBID_CORE_RangeStartW = 6,
|
| 179 |
+
NVTX_CBID_CORE_RangeEnd = 7,
|
| 180 |
+
NVTX_CBID_CORE_RangePushEx = 8,
|
| 181 |
+
NVTX_CBID_CORE_RangePushA = 9,
|
| 182 |
+
NVTX_CBID_CORE_RangePushW = 10,
|
| 183 |
+
NVTX_CBID_CORE_RangePop = 11,
|
| 184 |
+
NVTX_CBID_CORE_NameCategoryA = 12,
|
| 185 |
+
NVTX_CBID_CORE_NameCategoryW = 13,
|
| 186 |
+
NVTX_CBID_CORE_NameOsThreadA = 14,
|
| 187 |
+
NVTX_CBID_CORE_NameOsThreadW = 15,
|
| 188 |
+
/* --- New constants must only be added directly above this line --- */
|
| 189 |
+
NVTX_CBID_CORE_SIZE,
|
| 190 |
+
NVTX_CBID_CORE_FORCE_INT = 0x7fffffff
|
| 191 |
+
} NvtxCallbackIdCore;
|
| 192 |
+
|
| 193 |
+
typedef enum NvtxCallbackIdCore2
|
| 194 |
+
{
|
| 195 |
+
NVTX_CBID_CORE2_INVALID = 0,
|
| 196 |
+
NVTX_CBID_CORE2_DomainMarkEx = 1,
|
| 197 |
+
NVTX_CBID_CORE2_DomainRangeStartEx = 2,
|
| 198 |
+
NVTX_CBID_CORE2_DomainRangeEnd = 3,
|
| 199 |
+
NVTX_CBID_CORE2_DomainRangePushEx = 4,
|
| 200 |
+
NVTX_CBID_CORE2_DomainRangePop = 5,
|
| 201 |
+
NVTX_CBID_CORE2_DomainResourceCreate = 6,
|
| 202 |
+
NVTX_CBID_CORE2_DomainResourceDestroy = 7,
|
| 203 |
+
NVTX_CBID_CORE2_DomainNameCategoryA = 8,
|
| 204 |
+
NVTX_CBID_CORE2_DomainNameCategoryW = 9,
|
| 205 |
+
NVTX_CBID_CORE2_DomainRegisterStringA = 10,
|
| 206 |
+
NVTX_CBID_CORE2_DomainRegisterStringW = 11,
|
| 207 |
+
NVTX_CBID_CORE2_DomainCreateA = 12,
|
| 208 |
+
NVTX_CBID_CORE2_DomainCreateW = 13,
|
| 209 |
+
NVTX_CBID_CORE2_DomainDestroy = 14,
|
| 210 |
+
NVTX_CBID_CORE2_Initialize = 15,
|
| 211 |
+
/* --- New constants must only be added directly above this line --- */
|
| 212 |
+
NVTX_CBID_CORE2_SIZE,
|
| 213 |
+
NVTX_CBID_CORE2_FORCE_INT = 0x7fffffff
|
| 214 |
+
} NvtxCallbackIdCore2;
|
| 215 |
+
|
| 216 |
+
typedef enum NvtxCallbackIdCuda
|
| 217 |
+
{
|
| 218 |
+
NVTX_CBID_CUDA_INVALID = 0,
|
| 219 |
+
NVTX_CBID_CUDA_NameCuDeviceA = 1,
|
| 220 |
+
NVTX_CBID_CUDA_NameCuDeviceW = 2,
|
| 221 |
+
NVTX_CBID_CUDA_NameCuContextA = 3,
|
| 222 |
+
NVTX_CBID_CUDA_NameCuContextW = 4,
|
| 223 |
+
NVTX_CBID_CUDA_NameCuStreamA = 5,
|
| 224 |
+
NVTX_CBID_CUDA_NameCuStreamW = 6,
|
| 225 |
+
NVTX_CBID_CUDA_NameCuEventA = 7,
|
| 226 |
+
NVTX_CBID_CUDA_NameCuEventW = 8,
|
| 227 |
+
/* --- New constants must only be added directly above this line --- */
|
| 228 |
+
NVTX_CBID_CUDA_SIZE,
|
| 229 |
+
NVTX_CBID_CUDA_FORCE_INT = 0x7fffffff
|
| 230 |
+
} NvtxCallbackIdCuda;
|
| 231 |
+
|
| 232 |
+
typedef enum NvtxCallbackIdCudaRt
|
| 233 |
+
{
|
| 234 |
+
NVTX_CBID_CUDART_INVALID = 0,
|
| 235 |
+
NVTX_CBID_CUDART_NameCudaDeviceA = 1,
|
| 236 |
+
NVTX_CBID_CUDART_NameCudaDeviceW = 2,
|
| 237 |
+
NVTX_CBID_CUDART_NameCudaStreamA = 3,
|
| 238 |
+
NVTX_CBID_CUDART_NameCudaStreamW = 4,
|
| 239 |
+
NVTX_CBID_CUDART_NameCudaEventA = 5,
|
| 240 |
+
NVTX_CBID_CUDART_NameCudaEventW = 6,
|
| 241 |
+
/* --- New constants must only be added directly above this line --- */
|
| 242 |
+
NVTX_CBID_CUDART_SIZE,
|
| 243 |
+
NVTX_CBID_CUDART_FORCE_INT = 0x7fffffff
|
| 244 |
+
} NvtxCallbackIdCudaRt;
|
| 245 |
+
|
| 246 |
+
typedef enum NvtxCallbackIdOpenCL
|
| 247 |
+
{
|
| 248 |
+
NVTX_CBID_OPENCL_INVALID = 0,
|
| 249 |
+
NVTX_CBID_OPENCL_NameClDeviceA = 1,
|
| 250 |
+
NVTX_CBID_OPENCL_NameClDeviceW = 2,
|
| 251 |
+
NVTX_CBID_OPENCL_NameClContextA = 3,
|
| 252 |
+
NVTX_CBID_OPENCL_NameClContextW = 4,
|
| 253 |
+
NVTX_CBID_OPENCL_NameClCommandQueueA = 5,
|
| 254 |
+
NVTX_CBID_OPENCL_NameClCommandQueueW = 6,
|
| 255 |
+
NVTX_CBID_OPENCL_NameClMemObjectA = 7,
|
| 256 |
+
NVTX_CBID_OPENCL_NameClMemObjectW = 8,
|
| 257 |
+
NVTX_CBID_OPENCL_NameClSamplerA = 9,
|
| 258 |
+
NVTX_CBID_OPENCL_NameClSamplerW = 10,
|
| 259 |
+
NVTX_CBID_OPENCL_NameClProgramA = 11,
|
| 260 |
+
NVTX_CBID_OPENCL_NameClProgramW = 12,
|
| 261 |
+
NVTX_CBID_OPENCL_NameClEventA = 13,
|
| 262 |
+
NVTX_CBID_OPENCL_NameClEventW = 14,
|
| 263 |
+
/* --- New constants must only be added directly above this line --- */
|
| 264 |
+
NVTX_CBID_OPENCL_SIZE,
|
| 265 |
+
NVTX_CBID_OPENCL_FORCE_INT = 0x7fffffff
|
| 266 |
+
} NvtxCallbackIdOpenCL;
|
| 267 |
+
|
| 268 |
+
typedef enum NvtxCallbackIdSync
|
| 269 |
+
{
|
| 270 |
+
NVTX_CBID_SYNC_INVALID = 0,
|
| 271 |
+
NVTX_CBID_SYNC_DomainSyncUserCreate = 1,
|
| 272 |
+
NVTX_CBID_SYNC_DomainSyncUserDestroy = 2,
|
| 273 |
+
NVTX_CBID_SYNC_DomainSyncUserAcquireStart = 3,
|
| 274 |
+
NVTX_CBID_SYNC_DomainSyncUserAcquireFailed = 4,
|
| 275 |
+
NVTX_CBID_SYNC_DomainSyncUserAcquireSuccess = 5,
|
| 276 |
+
NVTX_CBID_SYNC_DomainSyncUserReleasing = 6,
|
| 277 |
+
/* --- New constants must only be added directly above this line --- */
|
| 278 |
+
NVTX_CBID_SYNC_SIZE,
|
| 279 |
+
NVTX_CBID_SYNC_FORCE_INT = 0x7fffffff
|
| 280 |
+
} NvtxCallbackIdSync;
|
| 281 |
+
|
| 282 |
+
/* IDs for NVTX Export Tables */
|
| 283 |
+
typedef enum NvtxExportTableID
|
| 284 |
+
{
|
| 285 |
+
NVTX_ETID_INVALID = 0,
|
| 286 |
+
NVTX_ETID_CALLBACKS = 1,
|
| 287 |
+
NVTX_ETID_RESERVED0 = 2,
|
| 288 |
+
NVTX_ETID_VERSIONINFO = 3,
|
| 289 |
+
/* --- New constants must only be added directly above this line --- */
|
| 290 |
+
NVTX_ETID_SIZE,
|
| 291 |
+
NVTX_ETID_FORCE_INT = 0x7fffffff
|
| 292 |
+
} NvtxExportTableID;
|
| 293 |
+
|
| 294 |
+
typedef void (* NvtxFunctionPointer)(void); /* generic uncallable function pointer, must be casted to appropriate function type */
|
| 295 |
+
typedef NvtxFunctionPointer** NvtxFunctionTable; /* double pointer because array(1) of pointers(2) to function pointers */
|
| 296 |
+
|
| 297 |
+
typedef struct NvtxExportTableCallbacks
|
| 298 |
+
{
|
| 299 |
+
size_t struct_size;
|
| 300 |
+
|
| 301 |
+
/* returns an array of pointer to function pointers*/
|
| 302 |
+
int (NVTX_API *GetModuleFunctionTable)(
|
| 303 |
+
NvtxCallbackModule module,
|
| 304 |
+
NvtxFunctionTable* out_table,
|
| 305 |
+
unsigned int* out_size);
|
| 306 |
+
} NvtxExportTableCallbacks;
|
| 307 |
+
|
| 308 |
+
typedef struct NvtxExportTableVersionInfo
|
| 309 |
+
{
|
| 310 |
+
/* sizeof(NvtxExportTableVersionInfo) */
|
| 311 |
+
size_t struct_size;
|
| 312 |
+
|
| 313 |
+
/* The API version comes from the NVTX library linked to the app. The
|
| 314 |
+
* injection library is can use this info to make some assumptions */
|
| 315 |
+
uint32_t version;
|
| 316 |
+
|
| 317 |
+
/* Reserved for alignment, do not use */
|
| 318 |
+
uint32_t reserved0;
|
| 319 |
+
|
| 320 |
+
/* This must be set by tools when attaching to provide applications
|
| 321 |
+
* the ability to, in emergency situations, detect problematic tools
|
| 322 |
+
* versions and modify the NVTX source to prevent attaching anything
|
| 323 |
+
* that causes trouble in the app. Currently, this value is ignored. */
|
| 324 |
+
void (NVTX_API *SetInjectionNvtxVersion)(
|
| 325 |
+
uint32_t version);
|
| 326 |
+
} NvtxExportTableVersionInfo;
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/adjacent_difference.h
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file adjacent_difference.h
|
| 19 |
+
* \brief Compute difference between consecutive elements of a range
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup transformations Transformations
|
| 30 |
+
* \{
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
| 35 |
+
* range <tt>[first, last)</tt>. That is, <tt>\*first</tt> is assigned to
|
| 36 |
+
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
| 37 |
+
* <tt>[first + 1, last)</tt>, the difference of <tt>\*i</tt> and <tt>*(i - 1)</tt>
|
| 38 |
+
* is assigned to <tt>\*(result + (i - first))</tt>.
|
| 39 |
+
*
|
| 40 |
+
* This version of \p adjacent_difference uses <tt>operator-</tt> to calculate
|
| 41 |
+
* differences.
|
| 42 |
+
*
|
| 43 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 44 |
+
*
|
| 45 |
+
* \param exec The execution policy to use for parallelization.
|
| 46 |
+
* \param first The beginning of the input range.
|
| 47 |
+
* \param last The end of the input range.
|
| 48 |
+
* \param result The beginning of the output range.
|
| 49 |
+
* \return The iterator <tt>result + (last - first)</tt>
|
| 50 |
+
*
|
| 51 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 52 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 53 |
+
* and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined,
|
| 54 |
+
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types,
|
| 55 |
+
* and the return type of <tt>x - y</tt> is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 56 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 57 |
+
*
|
| 58 |
+
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
| 59 |
+
* useful for computing differences "in place".
|
| 60 |
+
*
|
| 61 |
+
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
| 62 |
+
* the difference between adjacent elements of a range using the \p thrust::device execution policy:
|
| 63 |
+
*
|
| 64 |
+
* \code
|
| 65 |
+
* #include <thrust/adjacent_difference.h>
|
| 66 |
+
* #include <thrust/device_vector.h>
|
| 67 |
+
* #include <thrust/execution_policy.h>
|
| 68 |
+
* ...
|
| 69 |
+
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
| 70 |
+
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
| 71 |
+
* thrust::device_vector<int> d_result(8);
|
| 72 |
+
*
|
| 73 |
+
* thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin());
|
| 74 |
+
*
|
| 75 |
+
* // d_result is now [1, 1, -1, 1, -1, 1, -1, 1]
|
| 76 |
+
* \endcode
|
| 77 |
+
*
|
| 78 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference
|
| 79 |
+
* \see inclusive_scan
|
| 80 |
+
*/
|
| 81 |
+
template<typename DerivedPolicy, typename InputIterator, typename OutputIterator>
|
| 82 |
+
__host__ __device__
|
| 83 |
+
OutputIterator adjacent_difference(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 84 |
+
InputIterator first, InputIterator last,
|
| 85 |
+
OutputIterator result);
|
| 86 |
+
|
| 87 |
+
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
| 88 |
+
* range <tt>[first, last)</tt>. That is, <tt>*first</tt> is assigned to
|
| 89 |
+
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
| 90 |
+
* <tt>[first + 1, last)</tt>, <tt>binary_op(\*i, \*(i - 1))</tt> is assigned to
|
| 91 |
+
* <tt>\*(result + (i - first))</tt>.
|
| 92 |
+
*
|
| 93 |
+
* This version of \p adjacent_difference uses the binary function \p binary_op to
|
| 94 |
+
* calculate differences.
|
| 95 |
+
*
|
| 96 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 97 |
+
*
|
| 98 |
+
* \param exec The execution policy to use for parallelization.
|
| 99 |
+
* \param first The beginning of the input range.
|
| 100 |
+
* \param last The end of the input range.
|
| 101 |
+
* \param result The beginning of the output range.
|
| 102 |
+
* \param binary_op The binary function used to compute differences.
|
| 103 |
+
* \return The iterator <tt>result + (last - first)</tt>
|
| 104 |
+
*
|
| 105 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 106 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 107 |
+
* and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type,
|
| 108 |
+
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 109 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 110 |
+
* \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 111 |
+
*
|
| 112 |
+
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
| 113 |
+
* useful for computing differences "in place".
|
| 114 |
+
*
|
| 115 |
+
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
| 116 |
+
* the sum between adjacent elements of a range using the \p thrust::device execution policy:
|
| 117 |
+
*
|
| 118 |
+
* \code
|
| 119 |
+
* #include <thrust/adjacent_difference.h>
|
| 120 |
+
* #include <thrust/functional.h>
|
| 121 |
+
* #include <thrust/device_vector.h>
|
| 122 |
+
* #include <thrust/execution_policy.h>
|
| 123 |
+
* ...
|
| 124 |
+
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
| 125 |
+
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
| 126 |
+
* thrust::device_vector<int> d_result(8);
|
| 127 |
+
*
|
| 128 |
+
* thrust::adjacent_difference(thrust::device, d_data.begin(), d_data.end(), d_result.begin(), thrust::plus<int>());
|
| 129 |
+
*
|
| 130 |
+
* // d_result is now [1, 3, 3, 3, 3, 3, 3, 3]
|
| 131 |
+
* \endcode
|
| 132 |
+
*
|
| 133 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference
|
| 134 |
+
* \see inclusive_scan
|
| 135 |
+
*/
|
| 136 |
+
template<typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename BinaryFunction>
|
| 137 |
+
__host__ __device__
|
| 138 |
+
OutputIterator adjacent_difference(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 139 |
+
InputIterator first, InputIterator last,
|
| 140 |
+
OutputIterator result,
|
| 141 |
+
BinaryFunction binary_op);
|
| 142 |
+
|
| 143 |
+
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
| 144 |
+
* range <tt>[first, last)</tt>. That is, <tt>\*first</tt> is assigned to
|
| 145 |
+
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
| 146 |
+
* <tt>[first + 1, last)</tt>, the difference of <tt>\*i</tt> and <tt>*(i - 1)</tt>
|
| 147 |
+
* is assigned to <tt>\*(result + (i - first))</tt>.
|
| 148 |
+
*
|
| 149 |
+
* This version of \p adjacent_difference uses <tt>operator-</tt> to calculate
|
| 150 |
+
* differences.
|
| 151 |
+
*
|
| 152 |
+
* \param first The beginning of the input range.
|
| 153 |
+
* \param last The end of the input range.
|
| 154 |
+
* \param result The beginning of the output range.
|
| 155 |
+
* \return The iterator <tt>result + (last - first)</tt>
|
| 156 |
+
*
|
| 157 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 158 |
+
* and \c x and \c y are objects of \p InputIterator's \c value_type, then \c x - \c is defined,
|
| 159 |
+
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types,
|
| 160 |
+
* and the return type of <tt>x - y</tt> is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 161 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 162 |
+
*
|
| 163 |
+
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
| 164 |
+
* useful for computing differences "in place".
|
| 165 |
+
*
|
| 166 |
+
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
| 167 |
+
* the difference between adjacent elements of a range.
|
| 168 |
+
*
|
| 169 |
+
* \code
|
| 170 |
+
* #include <thrust/adjacent_difference.h>
|
| 171 |
+
* #include <thrust/device_vector.h>
|
| 172 |
+
* ...
|
| 173 |
+
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
| 174 |
+
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
| 175 |
+
* thrust::device_vector<int> d_result(8);
|
| 176 |
+
*
|
| 177 |
+
* thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin());
|
| 178 |
+
*
|
| 179 |
+
* // d_result is now [1, 1, -1, 1, -1, 1, -1, 1]
|
| 180 |
+
* \endcode
|
| 181 |
+
*
|
| 182 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference
|
| 183 |
+
* \see inclusive_scan
|
| 184 |
+
*/
|
| 185 |
+
template <typename InputIterator, typename OutputIterator>
|
| 186 |
+
OutputIterator adjacent_difference(InputIterator first, InputIterator last,
|
| 187 |
+
OutputIterator result);
|
| 188 |
+
|
| 189 |
+
/*! \p adjacent_difference calculates the differences of adjacent elements in the
|
| 190 |
+
* range <tt>[first, last)</tt>. That is, <tt>*first</tt> is assigned to
|
| 191 |
+
* <tt>\*result</tt>, and, for each iterator \p i in the range
|
| 192 |
+
* <tt>[first + 1, last)</tt>, <tt>binary_op(\*i, \*(i - 1))</tt> is assigned to
|
| 193 |
+
* <tt>\*(result + (i - first))</tt>.
|
| 194 |
+
*
|
| 195 |
+
* This version of \p adjacent_difference uses the binary function \p binary_op to
|
| 196 |
+
* calculate differences.
|
| 197 |
+
*
|
| 198 |
+
* \param first The beginning of the input range.
|
| 199 |
+
* \param last The end of the input range.
|
| 200 |
+
* \param result The beginning of the output range.
|
| 201 |
+
* \param binary_op The binary function used to compute differences.
|
| 202 |
+
* \return The iterator <tt>result + (last - first)</tt>
|
| 203 |
+
*
|
| 204 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 205 |
+
* and \p InputIterator's \c value_type is convertible to \p BinaryFunction's \c first_argument_type and \c second_argument_type,
|
| 206 |
+
* and \p InputIterator's \c value_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 207 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 208 |
+
* \tparam BinaryFunction's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 209 |
+
*
|
| 210 |
+
* \remark Note that \p result is permitted to be the same iterator as \p first. This is
|
| 211 |
+
* useful for computing differences "in place".
|
| 212 |
+
*
|
| 213 |
+
* The following code snippet demonstrates how to use \p adjacent_difference to compute
|
| 214 |
+
* the sum between adjacent elements of a range.
|
| 215 |
+
*
|
| 216 |
+
* \code
|
| 217 |
+
* #include <thrust/adjacent_difference.h>
|
| 218 |
+
* #include <thrust/functional.h>
|
| 219 |
+
* #include <thrust/device_vector.h>
|
| 220 |
+
* ...
|
| 221 |
+
* int h_data[8] = {1, 2, 1, 2, 1, 2, 1, 2};
|
| 222 |
+
* thrust::device_vector<int> d_data(h_data, h_data + 8);
|
| 223 |
+
* thrust::device_vector<int> d_result(8);
|
| 224 |
+
*
|
| 225 |
+
* thrust::adjacent_difference(d_data.begin(), d_data.end(), d_result.begin(), thrust::plus<int>());
|
| 226 |
+
*
|
| 227 |
+
* // d_result is now [1, 3, 3, 3, 3, 3, 3, 3]
|
| 228 |
+
* \endcode
|
| 229 |
+
*
|
| 230 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/adjacent_difference
|
| 231 |
+
* \see inclusive_scan
|
| 232 |
+
*/
|
| 233 |
+
template <typename InputIterator, typename OutputIterator, typename BinaryFunction>
|
| 234 |
+
OutputIterator adjacent_difference(InputIterator first, InputIterator last,
|
| 235 |
+
OutputIterator result,
|
| 236 |
+
BinaryFunction binary_op);
|
| 237 |
+
|
| 238 |
+
/*! \}
|
| 239 |
+
*/
|
| 240 |
+
|
| 241 |
+
THRUST_NAMESPACE_END
|
| 242 |
+
|
| 243 |
+
#include <thrust/detail/adjacent_difference.inl>
|
| 244 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/allocate_unique.h
ADDED
|
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) 2018 NVIDIA Corporation
|
| 2 |
+
// Author: Bryce Adelstein Lelbach <brycelelbach@gmail.com>
|
| 3 |
+
//
|
| 4 |
+
// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <thrust/detail/config.h>
|
| 9 |
+
#include <thrust/detail/cpp11_required.h>
|
| 10 |
+
|
| 11 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 12 |
+
|
| 13 |
+
#include <thrust/detail/raw_pointer_cast.h>
|
| 14 |
+
#include <thrust/detail/type_deduction.h>
|
| 15 |
+
#include <thrust/detail/memory_algorithms.h>
|
| 16 |
+
#include <thrust/detail/allocator/allocator_traits.h>
|
| 17 |
+
|
| 18 |
+
#include <utility>
|
| 19 |
+
#include <thrust/detail/memory_wrapper.h>
|
| 20 |
+
|
| 21 |
+
THRUST_NAMESPACE_BEGIN
|
| 22 |
+
|
| 23 |
+
// wg21.link/p0316r0
|
| 24 |
+
|
| 25 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 26 |
+
|
| 27 |
+
namespace detail
|
| 28 |
+
{
|
| 29 |
+
|
| 30 |
+
template <typename Allocator, typename Pointer>
|
| 31 |
+
void allocator_delete_impl(
|
| 32 |
+
Allocator const& alloc, Pointer p, std::false_type
|
| 33 |
+
)
|
| 34 |
+
{
|
| 35 |
+
using traits = typename detail::allocator_traits<
|
| 36 |
+
typename std::remove_cv<
|
| 37 |
+
typename std::remove_reference<Allocator>::type
|
| 38 |
+
>::type
|
| 39 |
+
>;
|
| 40 |
+
|
| 41 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 42 |
+
|
| 43 |
+
if (nullptr != pointer_traits<Pointer>::get(p))
|
| 44 |
+
{
|
| 45 |
+
traits::destroy(alloc_T, thrust::raw_pointer_cast(p));
|
| 46 |
+
traits::deallocate(alloc_T, p, 1);
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
template <typename Allocator, typename Pointer>
|
| 51 |
+
void allocator_delete_impl(
|
| 52 |
+
Allocator const& alloc, Pointer p, std::true_type
|
| 53 |
+
)
|
| 54 |
+
{
|
| 55 |
+
using traits = typename detail::allocator_traits<
|
| 56 |
+
typename std::remove_cv<
|
| 57 |
+
typename std::remove_reference<Allocator>::type
|
| 58 |
+
>::type
|
| 59 |
+
>;
|
| 60 |
+
|
| 61 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 62 |
+
|
| 63 |
+
if (nullptr != pointer_traits<Pointer>::get(p))
|
| 64 |
+
{
|
| 65 |
+
traits::deallocate(alloc_T, p, 1);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
} // namespace detail
|
| 70 |
+
|
| 71 |
+
template <typename T, typename Allocator, bool Uninitialized = false>
|
| 72 |
+
struct allocator_delete final
|
| 73 |
+
{
|
| 74 |
+
using allocator_type
|
| 75 |
+
= typename std::remove_cv<
|
| 76 |
+
typename std::remove_reference<Allocator>::type
|
| 77 |
+
>::type::template rebind<T>::other;
|
| 78 |
+
using pointer = typename detail::allocator_traits<allocator_type>::pointer;
|
| 79 |
+
|
| 80 |
+
template <typename UAllocator>
|
| 81 |
+
allocator_delete(UAllocator&& other) noexcept
|
| 82 |
+
: alloc_(THRUST_FWD(other))
|
| 83 |
+
{}
|
| 84 |
+
|
| 85 |
+
template <typename U, typename UAllocator>
|
| 86 |
+
allocator_delete(
|
| 87 |
+
allocator_delete<U, UAllocator> const& other
|
| 88 |
+
) noexcept
|
| 89 |
+
: alloc_(other.get_allocator())
|
| 90 |
+
{}
|
| 91 |
+
template <typename U, typename UAllocator>
|
| 92 |
+
allocator_delete(
|
| 93 |
+
allocator_delete<U, UAllocator>&& other
|
| 94 |
+
) noexcept
|
| 95 |
+
: alloc_(std::move(other.get_allocator()))
|
| 96 |
+
{}
|
| 97 |
+
|
| 98 |
+
template <typename U, typename UAllocator>
|
| 99 |
+
allocator_delete& operator=(
|
| 100 |
+
allocator_delete<U, UAllocator> const& other
|
| 101 |
+
) noexcept
|
| 102 |
+
{
|
| 103 |
+
alloc_ = other.get_allocator();
|
| 104 |
+
return *this;
|
| 105 |
+
}
|
| 106 |
+
template <typename U, typename UAllocator>
|
| 107 |
+
allocator_delete& operator=(
|
| 108 |
+
allocator_delete<U, UAllocator>&& other
|
| 109 |
+
) noexcept
|
| 110 |
+
{
|
| 111 |
+
alloc_ = std::move(other.get_allocator());
|
| 112 |
+
return *this;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
void operator()(pointer p)
|
| 116 |
+
{
|
| 117 |
+
std::integral_constant<bool, Uninitialized> ic;
|
| 118 |
+
|
| 119 |
+
detail::allocator_delete_impl(get_allocator(), p, ic);
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
allocator_type& get_allocator() noexcept { return alloc_; }
|
| 123 |
+
allocator_type const& get_allocator() const noexcept { return alloc_; }
|
| 124 |
+
|
| 125 |
+
void swap(allocator_delete& other) noexcept
|
| 126 |
+
{
|
| 127 |
+
using std::swap;
|
| 128 |
+
swap(alloc_, other.alloc_);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
private:
|
| 132 |
+
allocator_type alloc_;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
template <typename T, typename Allocator>
|
| 136 |
+
using uninitialized_allocator_delete = allocator_delete<T, Allocator, true>;
|
| 137 |
+
|
| 138 |
+
namespace detail {
|
| 139 |
+
|
| 140 |
+
template <typename Allocator, typename Pointer, typename Size>
|
| 141 |
+
void array_allocator_delete_impl(
|
| 142 |
+
Allocator const& alloc, Pointer p, Size count, std::false_type
|
| 143 |
+
)
|
| 144 |
+
{
|
| 145 |
+
using traits = typename detail::allocator_traits<
|
| 146 |
+
typename std::remove_cv<
|
| 147 |
+
typename std::remove_reference<Allocator>::type
|
| 148 |
+
>::type
|
| 149 |
+
>;
|
| 150 |
+
|
| 151 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 152 |
+
|
| 153 |
+
if (nullptr != pointer_traits<Pointer>::get(p))
|
| 154 |
+
{
|
| 155 |
+
destroy_n(alloc_T, p, count);
|
| 156 |
+
traits::deallocate(alloc_T, p, count);
|
| 157 |
+
}
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
template <typename Allocator, typename Pointer, typename Size>
|
| 161 |
+
void array_allocator_delete_impl(
|
| 162 |
+
Allocator const& alloc, Pointer p, Size count, std::true_type
|
| 163 |
+
)
|
| 164 |
+
{
|
| 165 |
+
using traits = typename detail::allocator_traits<
|
| 166 |
+
typename std::remove_cv<
|
| 167 |
+
typename std::remove_reference<Allocator>::type
|
| 168 |
+
>::type
|
| 169 |
+
>;
|
| 170 |
+
|
| 171 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 172 |
+
|
| 173 |
+
if (nullptr != pointer_traits<Pointer>::get(p))
|
| 174 |
+
{
|
| 175 |
+
traits::deallocate(alloc_T, p, count);
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
} // namespace detail
|
| 180 |
+
|
| 181 |
+
template <typename T, typename Allocator, bool Uninitialized = false>
|
| 182 |
+
struct array_allocator_delete final
|
| 183 |
+
{
|
| 184 |
+
using allocator_type
|
| 185 |
+
= typename std::remove_cv<
|
| 186 |
+
typename std::remove_reference<Allocator>::type
|
| 187 |
+
>::type::template rebind<T>::other;
|
| 188 |
+
using pointer = typename detail::allocator_traits<allocator_type>::pointer;
|
| 189 |
+
|
| 190 |
+
template <typename UAllocator>
|
| 191 |
+
array_allocator_delete(UAllocator&& other, std::size_t n) noexcept
|
| 192 |
+
: alloc_(THRUST_FWD(other)), count_(n)
|
| 193 |
+
{}
|
| 194 |
+
|
| 195 |
+
template <typename U, typename UAllocator>
|
| 196 |
+
array_allocator_delete(
|
| 197 |
+
array_allocator_delete<U, UAllocator> const& other
|
| 198 |
+
) noexcept
|
| 199 |
+
: alloc_(other.get_allocator()), count_(other.count_)
|
| 200 |
+
{}
|
| 201 |
+
template <typename U, typename UAllocator>
|
| 202 |
+
array_allocator_delete(
|
| 203 |
+
array_allocator_delete<U, UAllocator>&& other
|
| 204 |
+
) noexcept
|
| 205 |
+
: alloc_(std::move(other.get_allocator())), count_(other.count_)
|
| 206 |
+
{}
|
| 207 |
+
|
| 208 |
+
template <typename U, typename UAllocator>
|
| 209 |
+
array_allocator_delete& operator=(
|
| 210 |
+
array_allocator_delete<U, UAllocator> const& other
|
| 211 |
+
) noexcept
|
| 212 |
+
{
|
| 213 |
+
alloc_ = other.get_allocator();
|
| 214 |
+
count_ = other.count_;
|
| 215 |
+
return *this;
|
| 216 |
+
}
|
| 217 |
+
template <typename U, typename UAllocator>
|
| 218 |
+
array_allocator_delete& operator=(
|
| 219 |
+
array_allocator_delete<U, UAllocator>&& other
|
| 220 |
+
) noexcept
|
| 221 |
+
{
|
| 222 |
+
alloc_ = std::move(other.get_allocator());
|
| 223 |
+
count_ = other.count_;
|
| 224 |
+
return *this;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
void operator()(pointer p)
|
| 228 |
+
{
|
| 229 |
+
std::integral_constant<bool, Uninitialized> ic;
|
| 230 |
+
|
| 231 |
+
detail::array_allocator_delete_impl(get_allocator(), p, count_, ic);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
allocator_type& get_allocator() noexcept { return alloc_; }
|
| 235 |
+
allocator_type const& get_allocator() const noexcept { return alloc_; }
|
| 236 |
+
|
| 237 |
+
void swap(array_allocator_delete& other) noexcept
|
| 238 |
+
{
|
| 239 |
+
using std::swap;
|
| 240 |
+
swap(alloc_, other.alloc_);
|
| 241 |
+
swap(count_, other.count_);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
private:
|
| 245 |
+
allocator_type alloc_;
|
| 246 |
+
std::size_t count_;
|
| 247 |
+
};
|
| 248 |
+
|
| 249 |
+
template <typename T, typename Allocator>
|
| 250 |
+
using uninitialized_array_allocator_delete
|
| 251 |
+
= array_allocator_delete<T, Allocator, true>;
|
| 252 |
+
|
| 253 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 254 |
+
|
| 255 |
+
template <typename Pointer, typename Lambda>
|
| 256 |
+
struct tagged_deleter : Lambda
|
| 257 |
+
{
|
| 258 |
+
__host__ __device__
|
| 259 |
+
tagged_deleter(Lambda&& l) : Lambda(THRUST_FWD(l)) {}
|
| 260 |
+
|
| 261 |
+
using pointer = Pointer;
|
| 262 |
+
};
|
| 263 |
+
|
| 264 |
+
template <typename Pointer, typename Lambda>
|
| 265 |
+
__host__ __device__
|
| 266 |
+
tagged_deleter<Pointer, Lambda>
|
| 267 |
+
make_tagged_deleter(Lambda&& l)
|
| 268 |
+
{
|
| 269 |
+
return tagged_deleter<Pointer, Lambda>(THRUST_FWD(l));
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 273 |
+
|
| 274 |
+
template <typename T, typename Allocator, typename... Args>
|
| 275 |
+
__host__
|
| 276 |
+
std::unique_ptr<
|
| 277 |
+
T,
|
| 278 |
+
allocator_delete<
|
| 279 |
+
T
|
| 280 |
+
, typename detail::allocator_traits<
|
| 281 |
+
typename std::remove_cv<
|
| 282 |
+
typename std::remove_reference<Allocator>::type
|
| 283 |
+
>::type
|
| 284 |
+
>::template rebind_traits<T>::allocator_type
|
| 285 |
+
>
|
| 286 |
+
>
|
| 287 |
+
allocate_unique(
|
| 288 |
+
Allocator const& alloc, Args&&... args
|
| 289 |
+
)
|
| 290 |
+
{
|
| 291 |
+
using traits = typename detail::allocator_traits<
|
| 292 |
+
typename std::remove_cv<
|
| 293 |
+
typename std::remove_reference<Allocator>::type
|
| 294 |
+
>::type
|
| 295 |
+
>::template rebind_traits<T>;
|
| 296 |
+
|
| 297 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 298 |
+
|
| 299 |
+
auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
|
| 300 |
+
[&alloc_T] (typename traits::pointer p) {
|
| 301 |
+
traits::deallocate(alloc_T, p, 1);
|
| 302 |
+
}
|
| 303 |
+
);
|
| 304 |
+
using hold_t = std::unique_ptr<T, decltype(hold_deleter)>;
|
| 305 |
+
auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter);
|
| 306 |
+
|
| 307 |
+
traits::construct(
|
| 308 |
+
alloc_T, thrust::raw_pointer_cast(hold.get()), THRUST_FWD(args)...
|
| 309 |
+
);
|
| 310 |
+
auto deleter = allocator_delete<T, typename traits::allocator_type>(alloc);
|
| 311 |
+
return std::unique_ptr<T, decltype(deleter)>
|
| 312 |
+
(hold.release(), std::move(deleter));
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
template <typename T, typename Allocator>
|
| 316 |
+
__host__
|
| 317 |
+
std::unique_ptr<
|
| 318 |
+
T,
|
| 319 |
+
uninitialized_allocator_delete<
|
| 320 |
+
T
|
| 321 |
+
, typename detail::allocator_traits<
|
| 322 |
+
typename std::remove_cv<
|
| 323 |
+
typename std::remove_reference<Allocator>::type
|
| 324 |
+
>::type
|
| 325 |
+
>::template rebind_traits<T>::allocator_type
|
| 326 |
+
>
|
| 327 |
+
>
|
| 328 |
+
uninitialized_allocate_unique(
|
| 329 |
+
Allocator const& alloc
|
| 330 |
+
)
|
| 331 |
+
{
|
| 332 |
+
using traits = typename detail::allocator_traits<
|
| 333 |
+
typename std::remove_cv<
|
| 334 |
+
typename std::remove_reference<Allocator>::type
|
| 335 |
+
>::type
|
| 336 |
+
>::template rebind_traits<T>;
|
| 337 |
+
|
| 338 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 339 |
+
|
| 340 |
+
auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
|
| 341 |
+
[&alloc_T] (typename traits::pointer p) {
|
| 342 |
+
traits::deallocate(alloc_T, p, 1);
|
| 343 |
+
}
|
| 344 |
+
);
|
| 345 |
+
using hold_t = std::unique_ptr<T, decltype(hold_deleter)>;
|
| 346 |
+
auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter);
|
| 347 |
+
|
| 348 |
+
auto deleter = uninitialized_allocator_delete<
|
| 349 |
+
T, typename traits::allocator_type
|
| 350 |
+
>(alloc_T);
|
| 351 |
+
return std::unique_ptr<T, decltype(deleter)>
|
| 352 |
+
(hold.release(), std::move(deleter));
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
template <typename T, typename Allocator, typename Size, typename... Args>
|
| 356 |
+
__host__
|
| 357 |
+
std::unique_ptr<
|
| 358 |
+
T[],
|
| 359 |
+
array_allocator_delete<
|
| 360 |
+
T
|
| 361 |
+
, typename detail::allocator_traits<
|
| 362 |
+
typename std::remove_cv<
|
| 363 |
+
typename std::remove_reference<Allocator>::type
|
| 364 |
+
>::type
|
| 365 |
+
>::template rebind_traits<T>::allocator_type
|
| 366 |
+
>
|
| 367 |
+
>
|
| 368 |
+
allocate_unique_n(
|
| 369 |
+
Allocator const& alloc, Size n, Args&&... args
|
| 370 |
+
)
|
| 371 |
+
{
|
| 372 |
+
using traits = typename detail::allocator_traits<
|
| 373 |
+
typename std::remove_cv<
|
| 374 |
+
typename std::remove_reference<Allocator>::type
|
| 375 |
+
>::type
|
| 376 |
+
>::template rebind_traits<T>;
|
| 377 |
+
|
| 378 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 379 |
+
|
| 380 |
+
auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
|
| 381 |
+
[n, &alloc_T] (typename traits::pointer p) {
|
| 382 |
+
traits::deallocate(alloc_T, p, n);
|
| 383 |
+
}
|
| 384 |
+
);
|
| 385 |
+
using hold_t = std::unique_ptr<T[], decltype(hold_deleter)>;
|
| 386 |
+
auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter);
|
| 387 |
+
|
| 388 |
+
uninitialized_construct_n_with_allocator(
|
| 389 |
+
alloc_T, hold.get(), n, THRUST_FWD(args)...
|
| 390 |
+
);
|
| 391 |
+
auto deleter = array_allocator_delete<
|
| 392 |
+
T, typename traits::allocator_type
|
| 393 |
+
>(alloc_T, n);
|
| 394 |
+
return std::unique_ptr<T[], decltype(deleter)>
|
| 395 |
+
(hold.release(), std::move(deleter));
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
template <typename T, typename Allocator, typename Size>
|
| 399 |
+
__host__
|
| 400 |
+
std::unique_ptr<
|
| 401 |
+
T[],
|
| 402 |
+
uninitialized_array_allocator_delete<
|
| 403 |
+
T
|
| 404 |
+
, typename detail::allocator_traits<
|
| 405 |
+
typename std::remove_cv<
|
| 406 |
+
typename std::remove_reference<Allocator>::type
|
| 407 |
+
>::type
|
| 408 |
+
>::template rebind_traits<T>::allocator_type
|
| 409 |
+
>
|
| 410 |
+
>
|
| 411 |
+
uninitialized_allocate_unique_n(
|
| 412 |
+
Allocator const& alloc, Size n
|
| 413 |
+
)
|
| 414 |
+
{
|
| 415 |
+
using traits = typename detail::allocator_traits<
|
| 416 |
+
typename std::remove_cv<
|
| 417 |
+
typename std::remove_reference<Allocator>::type
|
| 418 |
+
>::type
|
| 419 |
+
>::template rebind_traits<T>;
|
| 420 |
+
|
| 421 |
+
typename traits::allocator_type alloc_T(alloc);
|
| 422 |
+
|
| 423 |
+
auto hold_deleter = make_tagged_deleter<typename traits::pointer>(
|
| 424 |
+
[n, &alloc_T] (typename traits::pointer p) {
|
| 425 |
+
traits::deallocate(alloc_T, p, n);
|
| 426 |
+
}
|
| 427 |
+
);
|
| 428 |
+
using hold_t = std::unique_ptr<T[], decltype(hold_deleter)>;
|
| 429 |
+
auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter);
|
| 430 |
+
|
| 431 |
+
auto deleter = uninitialized_array_allocator_delete<
|
| 432 |
+
T, typename traits::allocator_type
|
| 433 |
+
>(alloc_T, n);
|
| 434 |
+
return std::unique_ptr<T[], decltype(deleter)>
|
| 435 |
+
(hold.release(), std::move(deleter));
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 439 |
+
|
| 440 |
+
THRUST_NAMESPACE_END
|
| 441 |
+
|
| 442 |
+
#endif // THRUST_CPP_DIALECT >= 2011
|
| 443 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/binary_search.h
ADDED
|
@@ -0,0 +1,1899 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file binary_search.h
|
| 19 |
+
* \brief Search for values in sorted ranges.
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
#include <thrust/pair.h>
|
| 27 |
+
|
| 28 |
+
THRUST_NAMESPACE_BEGIN
|
| 29 |
+
|
| 30 |
+
/*! \addtogroup algorithms
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
/*! \addtogroup searching
|
| 35 |
+
* \ingroup algorithms
|
| 36 |
+
* \{
|
| 37 |
+
*/
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
/*! \addtogroup binary_search Binary Search
|
| 41 |
+
* \ingroup searching
|
| 42 |
+
* \{
|
| 43 |
+
*/
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
//////////////////////
|
| 47 |
+
// Scalar Functions //
|
| 48 |
+
//////////////////////
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
/*! \p lower_bound is a version of binary search: it attempts to find
|
| 52 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 53 |
+
* Specifically, it returns the first position where value could be
|
| 54 |
+
* inserted without violating the ordering. This version of
|
| 55 |
+
* \p lower_bound uses <tt>operator<</tt> for comparison and returns
|
| 56 |
+
* the furthermost iterator \c i in <tt>[first, last)</tt> such that,
|
| 57 |
+
* for every iterator \c j in <tt>[first, i)</tt>, <tt>*j < value</tt>.
|
| 58 |
+
*
|
| 59 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 60 |
+
*
|
| 61 |
+
* \param exec The execution policy to use for parallelization.
|
| 62 |
+
* \param first The beginning of the ordered sequence.
|
| 63 |
+
* \param last The end of the ordered sequence.
|
| 64 |
+
* \param value The value to be searched.
|
| 65 |
+
* \return The furthermost iterator \c i, such that <tt>*i < value</tt>.
|
| 66 |
+
*
|
| 67 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 68 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 69 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 70 |
+
*
|
| 71 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 72 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 73 |
+
*
|
| 74 |
+
* \code
|
| 75 |
+
* #include <thrust/binary_search.h>
|
| 76 |
+
* #include <thrust/device_vector.h>
|
| 77 |
+
* #include <thrust/execution_policy.h>
|
| 78 |
+
* ...
|
| 79 |
+
* thrust::device_vector<int> input(5);
|
| 80 |
+
*
|
| 81 |
+
* input[0] = 0;
|
| 82 |
+
* input[1] = 2;
|
| 83 |
+
* input[2] = 5;
|
| 84 |
+
* input[3] = 7;
|
| 85 |
+
* input[4] = 8;
|
| 86 |
+
*
|
| 87 |
+
* thrust::lower_bound(thrust::device, input.begin(), input.end(), 0); // returns input.begin()
|
| 88 |
+
* thrust::lower_bound(thrust::device, input.begin(), input.end(), 1); // returns input.begin() + 1
|
| 89 |
+
* thrust::lower_bound(thrust::device, input.begin(), input.end(), 2); // returns input.begin() + 1
|
| 90 |
+
* thrust::lower_bound(thrust::device, input.begin(), input.end(), 3); // returns input.begin() + 2
|
| 91 |
+
* thrust::lower_bound(thrust::device, input.begin(), input.end(), 8); // returns input.begin() + 4
|
| 92 |
+
* thrust::lower_bound(thrust::device, input.begin(), input.end(), 9); // returns input.end()
|
| 93 |
+
* \endcode
|
| 94 |
+
*
|
| 95 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 96 |
+
* \see \p upper_bound
|
| 97 |
+
* \see \p equal_range
|
| 98 |
+
* \see \p binary_search
|
| 99 |
+
*/
|
| 100 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
|
| 101 |
+
__host__ __device__
|
| 102 |
+
ForwardIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 103 |
+
ForwardIterator first,
|
| 104 |
+
ForwardIterator last,
|
| 105 |
+
const LessThanComparable &value);
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
/*! \p lower_bound is a version of binary search: it attempts to find
|
| 109 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 110 |
+
* Specifically, it returns the first position where value could be
|
| 111 |
+
* inserted without violating the ordering. This version of
|
| 112 |
+
* \p lower_bound uses <tt>operator<</tt> for comparison and returns
|
| 113 |
+
* the furthermost iterator \c i in <tt>[first, last)</tt> such that,
|
| 114 |
+
* for every iterator \c j in <tt>[first, i)</tt>, <tt>*j < value</tt>.
|
| 115 |
+
*
|
| 116 |
+
* \param first The beginning of the ordered sequence.
|
| 117 |
+
* \param last The end of the ordered sequence.
|
| 118 |
+
* \param value The value to be searched.
|
| 119 |
+
* \return The furthermost iterator \c i, such that <tt>*i < value</tt>.
|
| 120 |
+
*
|
| 121 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 122 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 123 |
+
*
|
| 124 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 125 |
+
* to search for values in a ordered range.
|
| 126 |
+
*
|
| 127 |
+
* \code
|
| 128 |
+
* #include <thrust/binary_search.h>
|
| 129 |
+
* #include <thrust/device_vector.h>
|
| 130 |
+
* ...
|
| 131 |
+
* thrust::device_vector<int> input(5);
|
| 132 |
+
*
|
| 133 |
+
* input[0] = 0;
|
| 134 |
+
* input[1] = 2;
|
| 135 |
+
* input[2] = 5;
|
| 136 |
+
* input[3] = 7;
|
| 137 |
+
* input[4] = 8;
|
| 138 |
+
*
|
| 139 |
+
* thrust::lower_bound(input.begin(), input.end(), 0); // returns input.begin()
|
| 140 |
+
* thrust::lower_bound(input.begin(), input.end(), 1); // returns input.begin() + 1
|
| 141 |
+
* thrust::lower_bound(input.begin(), input.end(), 2); // returns input.begin() + 1
|
| 142 |
+
* thrust::lower_bound(input.begin(), input.end(), 3); // returns input.begin() + 2
|
| 143 |
+
* thrust::lower_bound(input.begin(), input.end(), 8); // returns input.begin() + 4
|
| 144 |
+
* thrust::lower_bound(input.begin(), input.end(), 9); // returns input.end()
|
| 145 |
+
* \endcode
|
| 146 |
+
*
|
| 147 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 148 |
+
* \see \p upper_bound
|
| 149 |
+
* \see \p equal_range
|
| 150 |
+
* \see \p binary_search
|
| 151 |
+
*/
|
| 152 |
+
template <class ForwardIterator, class LessThanComparable>
|
| 153 |
+
ForwardIterator lower_bound(ForwardIterator first,
|
| 154 |
+
ForwardIterator last,
|
| 155 |
+
const LessThanComparable& value);
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
/*! \p lower_bound is a version of binary search: it attempts to find
|
| 159 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 160 |
+
* Specifically, it returns the first position where value could be
|
| 161 |
+
* inserted without violating the ordering. This version of
|
| 162 |
+
* \p lower_bound uses function object \c comp for comparison
|
| 163 |
+
* and returns the furthermost iterator \c i in <tt>[first, last)</tt>
|
| 164 |
+
* such that, for every iterator \c j in <tt>[first, i)</tt>,
|
| 165 |
+
* <tt>comp(*j, value)</tt> is \c true.
|
| 166 |
+
*
|
| 167 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 168 |
+
*
|
| 169 |
+
* \param exec The execution policy to use for parallelization.
|
| 170 |
+
* \param first The beginning of the ordered sequence.
|
| 171 |
+
* \param last The end of the ordered sequence.
|
| 172 |
+
* \param value The value to be searched.
|
| 173 |
+
* \param comp The comparison operator.
|
| 174 |
+
* \return The furthermost iterator \c i, such that <tt>comp(*i, value)</tt> is \c true.
|
| 175 |
+
*
|
| 176 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 177 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 178 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 179 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 180 |
+
*
|
| 181 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 182 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 183 |
+
*
|
| 184 |
+
* \code
|
| 185 |
+
* #include <thrust/binary_search.h>
|
| 186 |
+
* #include <thrust/device_vector.h>
|
| 187 |
+
* #include <thrust/functional.h>
|
| 188 |
+
* #include <thrust/execution_policy.h>
|
| 189 |
+
* ...
|
| 190 |
+
* thrust::device_vector<int> input(5);
|
| 191 |
+
*
|
| 192 |
+
* input[0] = 0;
|
| 193 |
+
* input[1] = 2;
|
| 194 |
+
* input[2] = 5;
|
| 195 |
+
* input[3] = 7;
|
| 196 |
+
* input[4] = 8;
|
| 197 |
+
*
|
| 198 |
+
* thrust::lower_bound(input.begin(), input.end(), 0, thrust::less<int>()); // returns input.begin()
|
| 199 |
+
* thrust::lower_bound(input.begin(), input.end(), 1, thrust::less<int>()); // returns input.begin() + 1
|
| 200 |
+
* thrust::lower_bound(input.begin(), input.end(), 2, thrust::less<int>()); // returns input.begin() + 1
|
| 201 |
+
* thrust::lower_bound(input.begin(), input.end(), 3, thrust::less<int>()); // returns input.begin() + 2
|
| 202 |
+
* thrust::lower_bound(input.begin(), input.end(), 8, thrust::less<int>()); // returns input.begin() + 4
|
| 203 |
+
* thrust::lower_bound(input.begin(), input.end(), 9, thrust::less<int>()); // returns input.end()
|
| 204 |
+
* \endcode
|
| 205 |
+
*
|
| 206 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 207 |
+
* \see \p upper_bound
|
| 208 |
+
* \see \p equal_range
|
| 209 |
+
* \see \p binary_search
|
| 210 |
+
*/
|
| 211 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
| 212 |
+
__host__ __device__
|
| 213 |
+
ForwardIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 214 |
+
ForwardIterator first,
|
| 215 |
+
ForwardIterator last,
|
| 216 |
+
const T &value,
|
| 217 |
+
StrictWeakOrdering comp);
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
/*! \p lower_bound is a version of binary search: it attempts to find
|
| 221 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 222 |
+
* Specifically, it returns the first position where value could be
|
| 223 |
+
* inserted without violating the ordering. This version of
|
| 224 |
+
* \p lower_bound uses function object \c comp for comparison
|
| 225 |
+
* and returns the furthermost iterator \c i in <tt>[first, last)</tt>
|
| 226 |
+
* such that, for every iterator \c j in <tt>[first, i)</tt>,
|
| 227 |
+
* <tt>comp(*j, value)</tt> is \c true.
|
| 228 |
+
*
|
| 229 |
+
* \param first The beginning of the ordered sequence.
|
| 230 |
+
* \param last The end of the ordered sequence.
|
| 231 |
+
* \param value The value to be searched.
|
| 232 |
+
* \param comp The comparison operator.
|
| 233 |
+
* \return The furthermost iterator \c i, such that <tt>comp(*i, value)</tt> is \c true.
|
| 234 |
+
*
|
| 235 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 236 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 237 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 238 |
+
*
|
| 239 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 240 |
+
* to search for values in a ordered range.
|
| 241 |
+
*
|
| 242 |
+
* \code
|
| 243 |
+
* #include <thrust/binary_search.h>
|
| 244 |
+
* #include <thrust/device_vector.h>
|
| 245 |
+
* #include <thrust/functional.h>
|
| 246 |
+
* ...
|
| 247 |
+
* thrust::device_vector<int> input(5);
|
| 248 |
+
*
|
| 249 |
+
* input[0] = 0;
|
| 250 |
+
* input[1] = 2;
|
| 251 |
+
* input[2] = 5;
|
| 252 |
+
* input[3] = 7;
|
| 253 |
+
* input[4] = 8;
|
| 254 |
+
*
|
| 255 |
+
* thrust::lower_bound(input.begin(), input.end(), 0, thrust::less<int>()); // returns input.begin()
|
| 256 |
+
* thrust::lower_bound(input.begin(), input.end(), 1, thrust::less<int>()); // returns input.begin() + 1
|
| 257 |
+
* thrust::lower_bound(input.begin(), input.end(), 2, thrust::less<int>()); // returns input.begin() + 1
|
| 258 |
+
* thrust::lower_bound(input.begin(), input.end(), 3, thrust::less<int>()); // returns input.begin() + 2
|
| 259 |
+
* thrust::lower_bound(input.begin(), input.end(), 8, thrust::less<int>()); // returns input.begin() + 4
|
| 260 |
+
* thrust::lower_bound(input.begin(), input.end(), 9, thrust::less<int>()); // returns input.end()
|
| 261 |
+
* \endcode
|
| 262 |
+
*
|
| 263 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 264 |
+
* \see \p upper_bound
|
| 265 |
+
* \see \p equal_range
|
| 266 |
+
* \see \p binary_search
|
| 267 |
+
*/
|
| 268 |
+
template <class ForwardIterator, class T, class StrictWeakOrdering>
|
| 269 |
+
ForwardIterator lower_bound(ForwardIterator first,
|
| 270 |
+
ForwardIterator last,
|
| 271 |
+
const T& value,
|
| 272 |
+
StrictWeakOrdering comp);
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
/*! \p upper_bound is a version of binary search: it attempts to find
|
| 276 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 277 |
+
* Specifically, it returns the last position where value could be
|
| 278 |
+
* inserted without violating the ordering. This version of
|
| 279 |
+
* \p upper_bound uses <tt>operator<</tt> for comparison and returns
|
| 280 |
+
* the furthermost iterator \c i in <tt>[first, last)</tt> such that,
|
| 281 |
+
* for every iterator \c j in <tt>[first, i)</tt>, <tt>value < *j</tt>
|
| 282 |
+
* is \c false.
|
| 283 |
+
*
|
| 284 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 285 |
+
*
|
| 286 |
+
* \param exec The execution policy to use for parallelization.
|
| 287 |
+
* \param first The beginning of the ordered sequence.
|
| 288 |
+
* \param last The end of the ordered sequence.
|
| 289 |
+
* \param value The value to be searched.
|
| 290 |
+
* \return The furthermost iterator \c i, such that <tt>value < *i</tt> is \c false.
|
| 291 |
+
*
|
| 292 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 293 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 294 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 295 |
+
*
|
| 296 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 297 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelism:
|
| 298 |
+
*
|
| 299 |
+
* \code
|
| 300 |
+
* #include <thrust/binary_search.h>
|
| 301 |
+
* #include <thrust/device_vector.h>
|
| 302 |
+
* #include <thrust/execution_policy.h>
|
| 303 |
+
* ...
|
| 304 |
+
* thrust::device_vector<int> input(5);
|
| 305 |
+
*
|
| 306 |
+
* input[0] = 0;
|
| 307 |
+
* input[1] = 2;
|
| 308 |
+
* input[2] = 5;
|
| 309 |
+
* input[3] = 7;
|
| 310 |
+
* input[4] = 8;
|
| 311 |
+
*
|
| 312 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 0); // returns input.begin() + 1
|
| 313 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 1); // returns input.begin() + 1
|
| 314 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 2); // returns input.begin() + 2
|
| 315 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 3); // returns input.begin() + 2
|
| 316 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 8); // returns input.end()
|
| 317 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 9); // returns input.end()
|
| 318 |
+
* \endcode
|
| 319 |
+
*
|
| 320 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 321 |
+
* \see \p lower_bound
|
| 322 |
+
* \see \p equal_range
|
| 323 |
+
* \see \p binary_search
|
| 324 |
+
*/
|
| 325 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
|
| 326 |
+
__host__ __device__
|
| 327 |
+
ForwardIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 328 |
+
ForwardIterator first,
|
| 329 |
+
ForwardIterator last,
|
| 330 |
+
const LessThanComparable &value);
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
/*! \p upper_bound is a version of binary search: it attempts to find
|
| 334 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 335 |
+
* Specifically, it returns the last position where value could be
|
| 336 |
+
* inserted without violating the ordering. This version of
|
| 337 |
+
* \p upper_bound uses <tt>operator<</tt> for comparison and returns
|
| 338 |
+
* the furthermost iterator \c i in <tt>[first, last)</tt> such that,
|
| 339 |
+
* for every iterator \c j in <tt>[first, i)</tt>, <tt>value < *j</tt>
|
| 340 |
+
* is \c false.
|
| 341 |
+
*
|
| 342 |
+
* \param first The beginning of the ordered sequence.
|
| 343 |
+
* \param last The end of the ordered sequence.
|
| 344 |
+
* \param value The value to be searched.
|
| 345 |
+
* \return The furthermost iterator \c i, such that <tt>value < *i</tt> is \c false.
|
| 346 |
+
*
|
| 347 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 348 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 349 |
+
*
|
| 350 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 351 |
+
* to search for values in a ordered range.
|
| 352 |
+
*
|
| 353 |
+
* \code
|
| 354 |
+
* #include <thrust/binary_search.h>
|
| 355 |
+
* #include <thrust/device_vector.h>
|
| 356 |
+
* ...
|
| 357 |
+
* thrust::device_vector<int> input(5);
|
| 358 |
+
*
|
| 359 |
+
* input[0] = 0;
|
| 360 |
+
* input[1] = 2;
|
| 361 |
+
* input[2] = 5;
|
| 362 |
+
* input[3] = 7;
|
| 363 |
+
* input[4] = 8;
|
| 364 |
+
*
|
| 365 |
+
* thrust::upper_bound(input.begin(), input.end(), 0); // returns input.begin() + 1
|
| 366 |
+
* thrust::upper_bound(input.begin(), input.end(), 1); // returns input.begin() + 1
|
| 367 |
+
* thrust::upper_bound(input.begin(), input.end(), 2); // returns input.begin() + 2
|
| 368 |
+
* thrust::upper_bound(input.begin(), input.end(), 3); // returns input.begin() + 2
|
| 369 |
+
* thrust::upper_bound(input.begin(), input.end(), 8); // returns input.end()
|
| 370 |
+
* thrust::upper_bound(input.begin(), input.end(), 9); // returns input.end()
|
| 371 |
+
* \endcode
|
| 372 |
+
*
|
| 373 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 374 |
+
* \see \p lower_bound
|
| 375 |
+
* \see \p equal_range
|
| 376 |
+
* \see \p binary_search
|
| 377 |
+
*/
|
| 378 |
+
template <class ForwardIterator, class LessThanComparable>
|
| 379 |
+
ForwardIterator upper_bound(ForwardIterator first,
|
| 380 |
+
ForwardIterator last,
|
| 381 |
+
const LessThanComparable& value);
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
/*! \p upper_bound is a version of binary search: it attempts to find
|
| 385 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 386 |
+
* Specifically, it returns the last position where value could be
|
| 387 |
+
* inserted without violating the ordering. This version of
|
| 388 |
+
* \p upper_bound uses function object \c comp for comparison and returns
|
| 389 |
+
* the furthermost iterator \c i in <tt>[first, last)</tt> such that,
|
| 390 |
+
* for every iterator \c j in <tt>[first, i)</tt>, <tt>comp(value, *j)</tt>
|
| 391 |
+
* is \c false.
|
| 392 |
+
*
|
| 393 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 394 |
+
*
|
| 395 |
+
* \param exec The execution policy to use for parallelization.
|
| 396 |
+
* \param first The beginning of the ordered sequence.
|
| 397 |
+
* \param last The end of the ordered sequence.
|
| 398 |
+
* \param value The value to be searched.
|
| 399 |
+
* \param comp The comparison operator.
|
| 400 |
+
* \return The furthermost iterator \c i, such that <tt>comp(value, *i)</tt> is \c false.
|
| 401 |
+
*
|
| 402 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 403 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 404 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 405 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 406 |
+
*
|
| 407 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 408 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 409 |
+
*
|
| 410 |
+
* \code
|
| 411 |
+
* #include <thrust/binary_search.h>
|
| 412 |
+
* #include <thrust/device_vector.h>
|
| 413 |
+
* #include <thrust/functional.h>
|
| 414 |
+
* #include <thrust/execution_policy.h>
|
| 415 |
+
* ...
|
| 416 |
+
* thrust::device_vector<int> input(5);
|
| 417 |
+
*
|
| 418 |
+
* input[0] = 0;
|
| 419 |
+
* input[1] = 2;
|
| 420 |
+
* input[2] = 5;
|
| 421 |
+
* input[3] = 7;
|
| 422 |
+
* input[4] = 8;
|
| 423 |
+
*
|
| 424 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 0, thrust::less<int>()); // returns input.begin() + 1
|
| 425 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 1, thrust::less<int>()); // returns input.begin() + 1
|
| 426 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 2, thrust::less<int>()); // returns input.begin() + 2
|
| 427 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 3, thrust::less<int>()); // returns input.begin() + 2
|
| 428 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 8, thrust::less<int>()); // returns input.end()
|
| 429 |
+
* thrust::upper_bound(thrust::device, input.begin(), input.end(), 9, thrust::less<int>()); // returns input.end()
|
| 430 |
+
* \endcode
|
| 431 |
+
*
|
| 432 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 433 |
+
* \see \p lower_bound
|
| 434 |
+
* \see \p equal_range
|
| 435 |
+
* \see \p binary_search
|
| 436 |
+
*/
|
| 437 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
| 438 |
+
__host__ __device__
|
| 439 |
+
ForwardIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 440 |
+
ForwardIterator first,
|
| 441 |
+
ForwardIterator last,
|
| 442 |
+
const T &value,
|
| 443 |
+
StrictWeakOrdering comp);
|
| 444 |
+
|
| 445 |
+
/*! \p upper_bound is a version of binary search: it attempts to find
|
| 446 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 447 |
+
* Specifically, it returns the last position where value could be
|
| 448 |
+
* inserted without violating the ordering. This version of
|
| 449 |
+
* \p upper_bound uses function object \c comp for comparison and returns
|
| 450 |
+
* the furthermost iterator \c i in <tt>[first, last)</tt> such that,
|
| 451 |
+
* for every iterator \c j in <tt>[first, i)</tt>, <tt>comp(value, *j)</tt>
|
| 452 |
+
* is \c false.
|
| 453 |
+
*
|
| 454 |
+
* \param first The beginning of the ordered sequence.
|
| 455 |
+
* \param last The end of the ordered sequence.
|
| 456 |
+
* \param value The value to be searched.
|
| 457 |
+
* \param comp The comparison operator.
|
| 458 |
+
* \return The furthermost iterator \c i, such that <tt>comp(value, *i)</tt> is \c false.
|
| 459 |
+
*
|
| 460 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 461 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 462 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 463 |
+
*
|
| 464 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 465 |
+
* to search for values in a ordered range.
|
| 466 |
+
*
|
| 467 |
+
* \code
|
| 468 |
+
* #include <thrust/binary_search.h>
|
| 469 |
+
* #include <thrust/device_vector.h>
|
| 470 |
+
* #include <thrust/functional.h>
|
| 471 |
+
* ...
|
| 472 |
+
* thrust::device_vector<int> input(5);
|
| 473 |
+
*
|
| 474 |
+
* input[0] = 0;
|
| 475 |
+
* input[1] = 2;
|
| 476 |
+
* input[2] = 5;
|
| 477 |
+
* input[3] = 7;
|
| 478 |
+
* input[4] = 8;
|
| 479 |
+
*
|
| 480 |
+
* thrust::upper_bound(input.begin(), input.end(), 0, thrust::less<int>()); // returns input.begin() + 1
|
| 481 |
+
* thrust::upper_bound(input.begin(), input.end(), 1, thrust::less<int>()); // returns input.begin() + 1
|
| 482 |
+
* thrust::upper_bound(input.begin(), input.end(), 2, thrust::less<int>()); // returns input.begin() + 2
|
| 483 |
+
* thrust::upper_bound(input.begin(), input.end(), 3, thrust::less<int>()); // returns input.begin() + 2
|
| 484 |
+
* thrust::upper_bound(input.begin(), input.end(), 8, thrust::less<int>()); // returns input.end()
|
| 485 |
+
* thrust::upper_bound(input.begin(), input.end(), 9, thrust::less<int>()); // returns input.end()
|
| 486 |
+
* \endcode
|
| 487 |
+
*
|
| 488 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 489 |
+
* \see \p lower_bound
|
| 490 |
+
* \see \p equal_range
|
| 491 |
+
* \see \p binary_search
|
| 492 |
+
*/
|
| 493 |
+
template <class ForwardIterator, class T, class StrictWeakOrdering>
|
| 494 |
+
ForwardIterator upper_bound(ForwardIterator first,
|
| 495 |
+
ForwardIterator last,
|
| 496 |
+
const T& value,
|
| 497 |
+
StrictWeakOrdering comp);
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
/*! \p binary_search is a version of binary search: it attempts to find
|
| 501 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 502 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 503 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 504 |
+
* exists. Specifically, this version returns \c true if and only if
|
| 505 |
+
* there exists an iterator \c i in <tt>[first, last)</tt> such that
|
| 506 |
+
* <tt>*i < value</tt> and <tt>value < *i</tt> are both \c false.
|
| 507 |
+
*
|
| 508 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 509 |
+
*
|
| 510 |
+
* \param exec The execution policy to use for parallelization.
|
| 511 |
+
* \param first The beginning of the ordered sequence.
|
| 512 |
+
* \param last The end of the ordered sequence.
|
| 513 |
+
* \param value The value to be searched.
|
| 514 |
+
* \return \c true if an equivalent element exists in <tt>[first, last)</tt>, otherwise \c false.
|
| 515 |
+
*
|
| 516 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 517 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 518 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 519 |
+
*
|
| 520 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 521 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 522 |
+
*
|
| 523 |
+
* \code
|
| 524 |
+
* #include <thrust/binary_search.h>
|
| 525 |
+
* #include <thrust/device_vector.h>
|
| 526 |
+
* #include <thrust/execution_policy.h>
|
| 527 |
+
* ...
|
| 528 |
+
* thrust::device_vector<int> input(5);
|
| 529 |
+
*
|
| 530 |
+
* input[0] = 0;
|
| 531 |
+
* input[1] = 2;
|
| 532 |
+
* input[2] = 5;
|
| 533 |
+
* input[3] = 7;
|
| 534 |
+
* input[4] = 8;
|
| 535 |
+
*
|
| 536 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 0); // returns true
|
| 537 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 1); // returns false
|
| 538 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 2); // returns true
|
| 539 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 3); // returns false
|
| 540 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 8); // returns true
|
| 541 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 9); // returns false
|
| 542 |
+
* \endcode
|
| 543 |
+
*
|
| 544 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 545 |
+
* \see \p lower_bound
|
| 546 |
+
* \see \p upper_bound
|
| 547 |
+
* \see \p equal_range
|
| 548 |
+
*/
|
| 549 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
|
| 550 |
+
__host__ __device__
|
| 551 |
+
bool binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 552 |
+
ForwardIterator first,
|
| 553 |
+
ForwardIterator last,
|
| 554 |
+
const LessThanComparable& value);
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
/*! \p binary_search is a version of binary search: it attempts to find
|
| 558 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 559 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 560 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 561 |
+
* exists. Specifically, this version returns \c true if and only if
|
| 562 |
+
* there exists an iterator \c i in <tt>[first, last)</tt> such that
|
| 563 |
+
* <tt>*i < value</tt> and <tt>value < *i</tt> are both \c false.
|
| 564 |
+
*
|
| 565 |
+
* \param first The beginning of the ordered sequence.
|
| 566 |
+
* \param last The end of the ordered sequence.
|
| 567 |
+
* \param value The value to be searched.
|
| 568 |
+
* \return \c true if an equivalent element exists in <tt>[first, last)</tt>, otherwise \c false.
|
| 569 |
+
*
|
| 570 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 571 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 572 |
+
*
|
| 573 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 574 |
+
* to search for values in a ordered range.
|
| 575 |
+
*
|
| 576 |
+
* \code
|
| 577 |
+
* #include <thrust/binary_search.h>
|
| 578 |
+
* #include <thrust/device_vector.h>
|
| 579 |
+
* ...
|
| 580 |
+
* thrust::device_vector<int> input(5);
|
| 581 |
+
*
|
| 582 |
+
* input[0] = 0;
|
| 583 |
+
* input[1] = 2;
|
| 584 |
+
* input[2] = 5;
|
| 585 |
+
* input[3] = 7;
|
| 586 |
+
* input[4] = 8;
|
| 587 |
+
*
|
| 588 |
+
* thrust::binary_search(input.begin(), input.end(), 0); // returns true
|
| 589 |
+
* thrust::binary_search(input.begin(), input.end(), 1); // returns false
|
| 590 |
+
* thrust::binary_search(input.begin(), input.end(), 2); // returns true
|
| 591 |
+
* thrust::binary_search(input.begin(), input.end(), 3); // returns false
|
| 592 |
+
* thrust::binary_search(input.begin(), input.end(), 8); // returns true
|
| 593 |
+
* thrust::binary_search(input.begin(), input.end(), 9); // returns false
|
| 594 |
+
* \endcode
|
| 595 |
+
*
|
| 596 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 597 |
+
* \see \p lower_bound
|
| 598 |
+
* \see \p upper_bound
|
| 599 |
+
* \see \p equal_range
|
| 600 |
+
*/
|
| 601 |
+
template <class ForwardIterator, class LessThanComparable>
|
| 602 |
+
bool binary_search(ForwardIterator first,
|
| 603 |
+
ForwardIterator last,
|
| 604 |
+
const LessThanComparable& value);
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
/*! \p binary_search is a version of binary search: it attempts to find
|
| 608 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 609 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 610 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 611 |
+
* exists. Specifically, this version returns \c true if and only if
|
| 612 |
+
* there exists an iterator \c i in <tt>[first, last)</tt> such that
|
| 613 |
+
* <tt>comp(*i, value)</tt> and <tt>comp(value, *i)</tt> are both \c false.
|
| 614 |
+
*
|
| 615 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 616 |
+
*
|
| 617 |
+
* \param exec The execution policy to use for parallelization.
|
| 618 |
+
* \param first The beginning of the ordered sequence.
|
| 619 |
+
* \param last The end of the ordered sequence.
|
| 620 |
+
* \param value The value to be searched.
|
| 621 |
+
* \param comp The comparison operator.
|
| 622 |
+
* \return \c true if an equivalent element exists in <tt>[first, last)</tt>, otherwise \c false.
|
| 623 |
+
*
|
| 624 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 625 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 626 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 627 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 628 |
+
*
|
| 629 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 630 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 631 |
+
*
|
| 632 |
+
* \code
|
| 633 |
+
* #include <thrust/binary_search.h>
|
| 634 |
+
* #include <thrust/device_vector.h>
|
| 635 |
+
* #include <thrust/functional.h>
|
| 636 |
+
* #include <thrust/execution_policy.h>
|
| 637 |
+
* ...
|
| 638 |
+
* thrust::device_vector<int> input(5);
|
| 639 |
+
*
|
| 640 |
+
* input[0] = 0;
|
| 641 |
+
* input[1] = 2;
|
| 642 |
+
* input[2] = 5;
|
| 643 |
+
* input[3] = 7;
|
| 644 |
+
* input[4] = 8;
|
| 645 |
+
*
|
| 646 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 0, thrust::less<int>()); // returns true
|
| 647 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 1, thrust::less<int>()); // returns false
|
| 648 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 2, thrust::less<int>()); // returns true
|
| 649 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 3, thrust::less<int>()); // returns false
|
| 650 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 8, thrust::less<int>()); // returns true
|
| 651 |
+
* thrust::binary_search(thrust::device, input.begin(), input.end(), 9, thrust::less<int>()); // returns false
|
| 652 |
+
* \endcode
|
| 653 |
+
*
|
| 654 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 655 |
+
* \see \p lower_bound
|
| 656 |
+
* \see \p upper_bound
|
| 657 |
+
* \see \p equal_range
|
| 658 |
+
*/
|
| 659 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
| 660 |
+
__host__ __device__
|
| 661 |
+
bool binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 662 |
+
ForwardIterator first,
|
| 663 |
+
ForwardIterator last,
|
| 664 |
+
const T& value,
|
| 665 |
+
StrictWeakOrdering comp);
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
/*! \p binary_search is a version of binary search: it attempts to find
|
| 669 |
+
* the element value in an ordered range <tt>[first, last)</tt>.
|
| 670 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 671 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 672 |
+
* exists. Specifically, this version returns \c true if and only if
|
| 673 |
+
* there exists an iterator \c i in <tt>[first, last)</tt> such that
|
| 674 |
+
* <tt>comp(*i, value)</tt> and <tt>comp(value, *i)</tt> are both \c false.
|
| 675 |
+
*
|
| 676 |
+
* \param first The beginning of the ordered sequence.
|
| 677 |
+
* \param last The end of the ordered sequence.
|
| 678 |
+
* \param value The value to be searched.
|
| 679 |
+
* \param comp The comparison operator.
|
| 680 |
+
* \return \c true if an equivalent element exists in <tt>[first, last)</tt>, otherwise \c false.
|
| 681 |
+
*
|
| 682 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 683 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 684 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 685 |
+
*
|
| 686 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 687 |
+
* to search for values in a ordered range.
|
| 688 |
+
*
|
| 689 |
+
* \code
|
| 690 |
+
* #include <thrust/binary_search.h>
|
| 691 |
+
* #include <thrust/device_vector.h>
|
| 692 |
+
* #include <thrust/functional.h>
|
| 693 |
+
* ...
|
| 694 |
+
* thrust::device_vector<int> input(5);
|
| 695 |
+
*
|
| 696 |
+
* input[0] = 0;
|
| 697 |
+
* input[1] = 2;
|
| 698 |
+
* input[2] = 5;
|
| 699 |
+
* input[3] = 7;
|
| 700 |
+
* input[4] = 8;
|
| 701 |
+
*
|
| 702 |
+
* thrust::binary_search(input.begin(), input.end(), 0, thrust::less<int>()); // returns true
|
| 703 |
+
* thrust::binary_search(input.begin(), input.end(), 1, thrust::less<int>()); // returns false
|
| 704 |
+
* thrust::binary_search(input.begin(), input.end(), 2, thrust::less<int>()); // returns true
|
| 705 |
+
* thrust::binary_search(input.begin(), input.end(), 3, thrust::less<int>()); // returns false
|
| 706 |
+
* thrust::binary_search(input.begin(), input.end(), 8, thrust::less<int>()); // returns true
|
| 707 |
+
* thrust::binary_search(input.begin(), input.end(), 9, thrust::less<int>()); // returns false
|
| 708 |
+
* \endcode
|
| 709 |
+
*
|
| 710 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 711 |
+
* \see \p lower_bound
|
| 712 |
+
* \see \p upper_bound
|
| 713 |
+
* \see \p equal_range
|
| 714 |
+
*/
|
| 715 |
+
template <class ForwardIterator, class T, class StrictWeakOrdering>
|
| 716 |
+
bool binary_search(ForwardIterator first,
|
| 717 |
+
ForwardIterator last,
|
| 718 |
+
const T& value,
|
| 719 |
+
StrictWeakOrdering comp);
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
/*! \p equal_range is a version of binary search: it attempts to find
|
| 723 |
+
* the element value in an ordered range <tt>[first, last)</tt>. The
|
| 724 |
+
* value returned by \p equal_range is essentially a combination of
|
| 725 |
+
* the values returned by \p lower_bound and \p upper_bound: it returns
|
| 726 |
+
* a \p pair of iterators \c i and \c j such that \c i is the first
|
| 727 |
+
* position where value could be inserted without violating the
|
| 728 |
+
* ordering and \c j is the last position where value could be inserted
|
| 729 |
+
* without violating the ordering. It follows that every element in the
|
| 730 |
+
* range <tt>[i, j)</tt> is equivalent to value, and that
|
| 731 |
+
* <tt>[i, j)</tt> is the largest subrange of <tt>[first, last)</tt> that
|
| 732 |
+
* has this property.
|
| 733 |
+
*
|
| 734 |
+
* This version of \p equal_range returns a \p pair of iterators
|
| 735 |
+
* <tt>[i, j)</tt>, where \c i is the furthermost iterator in
|
| 736 |
+
* <tt>[first, last)</tt> such that, for every iterator \c k in
|
| 737 |
+
* <tt>[first, i)</tt>, <tt>*k < value</tt>. \c j is the furthermost
|
| 738 |
+
* iterator in <tt>[first, last)</tt> such that, for every iterator
|
| 739 |
+
* \c k in <tt>[first, j)</tt>, <tt>value < *k</tt> is \c false.
|
| 740 |
+
* For every iterator \c k in <tt>[i, j)</tt>, neither
|
| 741 |
+
* <tt>value < *k</tt> nor <tt>*k < value</tt> is \c true.
|
| 742 |
+
*
|
| 743 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 744 |
+
*
|
| 745 |
+
* \param exec The execution policy to use for parallelization.
|
| 746 |
+
* \param first The beginning of the ordered sequence.
|
| 747 |
+
* \param last The end of the ordered sequence.
|
| 748 |
+
* \param value The value to be searched.
|
| 749 |
+
* \return A \p pair of iterators <tt>[i, j)</tt> that define the range of equivalent elements.
|
| 750 |
+
*
|
| 751 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 752 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 753 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 754 |
+
*
|
| 755 |
+
* The following code snippet demonstrates how to use \p equal_range
|
| 756 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 757 |
+
*
|
| 758 |
+
* \code
|
| 759 |
+
* #include <thrust/binary_search.h>
|
| 760 |
+
* #include <thrust/device_vector.h>
|
| 761 |
+
* #include <thrust/execution_policy.h>
|
| 762 |
+
* ...
|
| 763 |
+
* thrust::device_vector<int> input(5);
|
| 764 |
+
*
|
| 765 |
+
* input[0] = 0;
|
| 766 |
+
* input[1] = 2;
|
| 767 |
+
* input[2] = 5;
|
| 768 |
+
* input[3] = 7;
|
| 769 |
+
* input[4] = 8;
|
| 770 |
+
*
|
| 771 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 0); // returns [input.begin(), input.begin() + 1)
|
| 772 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 1); // returns [input.begin() + 1, input.begin() + 1)
|
| 773 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 2); // returns [input.begin() + 1, input.begin() + 2)
|
| 774 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 3); // returns [input.begin() + 2, input.begin() + 2)
|
| 775 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 8); // returns [input.begin() + 4, input.end)
|
| 776 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 9); // returns [input.end(), input.end)
|
| 777 |
+
* \endcode
|
| 778 |
+
*
|
| 779 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal_range
|
| 780 |
+
* \see \p lower_bound
|
| 781 |
+
* \see \p upper_bound
|
| 782 |
+
* \see \p binary_search
|
| 783 |
+
*/
|
| 784 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename LessThanComparable>
|
| 785 |
+
__host__ __device__
|
| 786 |
+
thrust::pair<ForwardIterator, ForwardIterator>
|
| 787 |
+
equal_range(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 788 |
+
ForwardIterator first,
|
| 789 |
+
ForwardIterator last,
|
| 790 |
+
const LessThanComparable& value);
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
/*! \p equal_range is a version of binary search: it attempts to find
|
| 794 |
+
* the element value in an ordered range <tt>[first, last)</tt>. The
|
| 795 |
+
* value returned by \p equal_range is essentially a combination of
|
| 796 |
+
* the values returned by \p lower_bound and \p upper_bound: it returns
|
| 797 |
+
* a \p pair of iterators \c i and \c j such that \c i is the first
|
| 798 |
+
* position where value could be inserted without violating the
|
| 799 |
+
* ordering and \c j is the last position where value could be inserted
|
| 800 |
+
* without violating the ordering. It follows that every element in the
|
| 801 |
+
* range <tt>[i, j)</tt> is equivalent to value, and that
|
| 802 |
+
* <tt>[i, j)</tt> is the largest subrange of <tt>[first, last)</tt> that
|
| 803 |
+
* has this property.
|
| 804 |
+
*
|
| 805 |
+
* This version of \p equal_range returns a \p pair of iterators
|
| 806 |
+
* <tt>[i, j)</tt>, where \c i is the furthermost iterator in
|
| 807 |
+
* <tt>[first, last)</tt> such that, for every iterator \c k in
|
| 808 |
+
* <tt>[first, i)</tt>, <tt>*k < value</tt>. \c j is the furthermost
|
| 809 |
+
* iterator in <tt>[first, last)</tt> such that, for every iterator
|
| 810 |
+
* \c k in <tt>[first, j)</tt>, <tt>value < *k</tt> is \c false.
|
| 811 |
+
* For every iterator \c k in <tt>[i, j)</tt>, neither
|
| 812 |
+
* <tt>value < *k</tt> nor <tt>*k < value</tt> is \c true.
|
| 813 |
+
*
|
| 814 |
+
* \param first The beginning of the ordered sequence.
|
| 815 |
+
* \param last The end of the ordered sequence.
|
| 816 |
+
* \param value The value to be searched.
|
| 817 |
+
* \return A \p pair of iterators <tt>[i, j)</tt> that define the range of equivalent elements.
|
| 818 |
+
*
|
| 819 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 820 |
+
* \tparam LessThanComparable is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 821 |
+
*
|
| 822 |
+
* The following code snippet demonstrates how to use \p equal_range
|
| 823 |
+
* to search for values in a ordered range.
|
| 824 |
+
*
|
| 825 |
+
* \code
|
| 826 |
+
* #include <thrust/binary_search.h>
|
| 827 |
+
* #include <thrust/device_vector.h>
|
| 828 |
+
* ...
|
| 829 |
+
* thrust::device_vector<int> input(5);
|
| 830 |
+
*
|
| 831 |
+
* input[0] = 0;
|
| 832 |
+
* input[1] = 2;
|
| 833 |
+
* input[2] = 5;
|
| 834 |
+
* input[3] = 7;
|
| 835 |
+
* input[4] = 8;
|
| 836 |
+
*
|
| 837 |
+
* thrust::equal_range(input.begin(), input.end(), 0); // returns [input.begin(), input.begin() + 1)
|
| 838 |
+
* thrust::equal_range(input.begin(), input.end(), 1); // returns [input.begin() + 1, input.begin() + 1)
|
| 839 |
+
* thrust::equal_range(input.begin(), input.end(), 2); // returns [input.begin() + 1, input.begin() + 2)
|
| 840 |
+
* thrust::equal_range(input.begin(), input.end(), 3); // returns [input.begin() + 2, input.begin() + 2)
|
| 841 |
+
* thrust::equal_range(input.begin(), input.end(), 8); // returns [input.begin() + 4, input.end)
|
| 842 |
+
* thrust::equal_range(input.begin(), input.end(), 9); // returns [input.end(), input.end)
|
| 843 |
+
* \endcode
|
| 844 |
+
*
|
| 845 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal_range
|
| 846 |
+
* \see \p lower_bound
|
| 847 |
+
* \see \p upper_bound
|
| 848 |
+
* \see \p binary_search
|
| 849 |
+
*/
|
| 850 |
+
template <class ForwardIterator, class LessThanComparable>
|
| 851 |
+
thrust::pair<ForwardIterator, ForwardIterator>
|
| 852 |
+
equal_range(ForwardIterator first,
|
| 853 |
+
ForwardIterator last,
|
| 854 |
+
const LessThanComparable& value);
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
/*! \p equal_range is a version of binary search: it attempts to find
|
| 858 |
+
* the element value in an ordered range <tt>[first, last)</tt>. The
|
| 859 |
+
* value returned by \p equal_range is essentially a combination of
|
| 860 |
+
* the values returned by \p lower_bound and \p upper_bound: it returns
|
| 861 |
+
* a \p pair of iterators \c i and \c j such that \c i is the first
|
| 862 |
+
* position where value could be inserted without violating the
|
| 863 |
+
* ordering and \c j is the last position where value could be inserted
|
| 864 |
+
* without violating the ordering. It follows that every element in the
|
| 865 |
+
* range <tt>[i, j)</tt> is equivalent to value, and that
|
| 866 |
+
* <tt>[i, j)</tt> is the largest subrange of <tt>[first, last)</tt> that
|
| 867 |
+
* has this property.
|
| 868 |
+
*
|
| 869 |
+
* This version of \p equal_range returns a \p pair of iterators
|
| 870 |
+
* <tt>[i, j)</tt>. \c i is the furthermost iterator in
|
| 871 |
+
* <tt>[first, last)</tt> such that, for every iterator \c k in
|
| 872 |
+
* <tt>[first, i)</tt>, <tt>comp(*k, value)</tt> is \c true.
|
| 873 |
+
* \c j is the furthermost iterator in <tt>[first, last)</tt> such
|
| 874 |
+
* that, for every iterator \c k in <tt>[first, last)</tt>,
|
| 875 |
+
* <tt>comp(value, *k)</tt> is \c false. For every iterator \c k
|
| 876 |
+
* in <tt>[i, j)</tt>, neither <tt>comp(value, *k)</tt> nor
|
| 877 |
+
* <tt>comp(*k, value)</tt> is \c true.
|
| 878 |
+
*
|
| 879 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 880 |
+
*
|
| 881 |
+
* \param exec The execution policy to use for parallelization.
|
| 882 |
+
* \param first The beginning of the ordered sequence.
|
| 883 |
+
* \param last The end of the ordered sequence.
|
| 884 |
+
* \param value The value to be searched.
|
| 885 |
+
* \param comp The comparison operator.
|
| 886 |
+
* \return A \p pair of iterators <tt>[i, j)</tt> that define the range of equivalent elements.
|
| 887 |
+
*
|
| 888 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 889 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 890 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 891 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 892 |
+
*
|
| 893 |
+
* The following code snippet demonstrates how to use \p equal_range
|
| 894 |
+
* to search for values in a ordered range using the \p thrust::device execution policy for parallelization:
|
| 895 |
+
*
|
| 896 |
+
* \code
|
| 897 |
+
* #include <thrust/binary_search.h>
|
| 898 |
+
* #include <thrust/device_vector.h>
|
| 899 |
+
* #include <thrust/functional.h>
|
| 900 |
+
* #include <thrust/execution_policy.h>
|
| 901 |
+
* ...
|
| 902 |
+
* thrust::device_vector<int> input(5);
|
| 903 |
+
*
|
| 904 |
+
* input[0] = 0;
|
| 905 |
+
* input[1] = 2;
|
| 906 |
+
* input[2] = 5;
|
| 907 |
+
* input[3] = 7;
|
| 908 |
+
* input[4] = 8;
|
| 909 |
+
*
|
| 910 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 0, thrust::less<int>()); // returns [input.begin(), input.begin() + 1)
|
| 911 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 1, thrust::less<int>()); // returns [input.begin() + 1, input.begin() + 1)
|
| 912 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 2, thrust::less<int>()); // returns [input.begin() + 1, input.begin() + 2)
|
| 913 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 3, thrust::less<int>()); // returns [input.begin() + 2, input.begin() + 2)
|
| 914 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 8, thrust::less<int>()); // returns [input.begin() + 4, input.end)
|
| 915 |
+
* thrust::equal_range(thrust::device, input.begin(), input.end(), 9, thrust::less<int>()); // returns [input.end(), input.end)
|
| 916 |
+
* \endcode
|
| 917 |
+
*
|
| 918 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal_range
|
| 919 |
+
* \see \p lower_bound
|
| 920 |
+
* \see \p upper_bound
|
| 921 |
+
* \see \p binary_search
|
| 922 |
+
*/
|
| 923 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename T, typename StrictWeakOrdering>
|
| 924 |
+
__host__ __device__
|
| 925 |
+
thrust::pair<ForwardIterator, ForwardIterator>
|
| 926 |
+
equal_range(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 927 |
+
ForwardIterator first,
|
| 928 |
+
ForwardIterator last,
|
| 929 |
+
const T& value,
|
| 930 |
+
StrictWeakOrdering comp);
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
/*! \p equal_range is a version of binary search: it attempts to find
|
| 934 |
+
* the element value in an ordered range <tt>[first, last)</tt>. The
|
| 935 |
+
* value returned by \p equal_range is essentially a combination of
|
| 936 |
+
* the values returned by \p lower_bound and \p upper_bound: it returns
|
| 937 |
+
* a \p pair of iterators \c i and \c j such that \c i is the first
|
| 938 |
+
* position where value could be inserted without violating the
|
| 939 |
+
* ordering and \c j is the last position where value could be inserted
|
| 940 |
+
* without violating the ordering. It follows that every element in the
|
| 941 |
+
* range <tt>[i, j)</tt> is equivalent to value, and that
|
| 942 |
+
* <tt>[i, j)</tt> is the largest subrange of <tt>[first, last)</tt> that
|
| 943 |
+
* has this property.
|
| 944 |
+
*
|
| 945 |
+
* This version of \p equal_range returns a \p pair of iterators
|
| 946 |
+
* <tt>[i, j)</tt>. \c i is the furthermost iterator in
|
| 947 |
+
* <tt>[first, last)</tt> such that, for every iterator \c k in
|
| 948 |
+
* <tt>[first, i)</tt>, <tt>comp(*k, value)</tt> is \c true.
|
| 949 |
+
* \c j is the furthermost iterator in <tt>[first, last)</tt> such
|
| 950 |
+
* that, for every iterator \c k in <tt>[first, last)</tt>,
|
| 951 |
+
* <tt>comp(value, *k)</tt> is \c false. For every iterator \c k
|
| 952 |
+
* in <tt>[i, j)</tt>, neither <tt>comp(value, *k)</tt> nor
|
| 953 |
+
* <tt>comp(*k, value)</tt> is \c true.
|
| 954 |
+
*
|
| 955 |
+
* \param first The beginning of the ordered sequence.
|
| 956 |
+
* \param last The end of the ordered sequence.
|
| 957 |
+
* \param value The value to be searched.
|
| 958 |
+
* \param comp The comparison operator.
|
| 959 |
+
* \return A \p pair of iterators <tt>[i, j)</tt> that define the range of equivalent elements.
|
| 960 |
+
*
|
| 961 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 962 |
+
* \tparam T is comparable to \p ForwardIterator's \c value_type.
|
| 963 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 964 |
+
*
|
| 965 |
+
* The following code snippet demonstrates how to use \p equal_range
|
| 966 |
+
* to search for values in a ordered range.
|
| 967 |
+
*
|
| 968 |
+
* \code
|
| 969 |
+
* #include <thrust/binary_search.h>
|
| 970 |
+
* #include <thrust/device_vector.h>
|
| 971 |
+
* #include <thrust/functional.h>
|
| 972 |
+
* ...
|
| 973 |
+
* thrust::device_vector<int> input(5);
|
| 974 |
+
*
|
| 975 |
+
* input[0] = 0;
|
| 976 |
+
* input[1] = 2;
|
| 977 |
+
* input[2] = 5;
|
| 978 |
+
* input[3] = 7;
|
| 979 |
+
* input[4] = 8;
|
| 980 |
+
*
|
| 981 |
+
* thrust::equal_range(input.begin(), input.end(), 0, thrust::less<int>()); // returns [input.begin(), input.begin() + 1)
|
| 982 |
+
* thrust::equal_range(input.begin(), input.end(), 1, thrust::less<int>()); // returns [input.begin() + 1, input.begin() + 1)
|
| 983 |
+
* thrust::equal_range(input.begin(), input.end(), 2, thrust::less<int>()); // returns [input.begin() + 1, input.begin() + 2)
|
| 984 |
+
* thrust::equal_range(input.begin(), input.end(), 3, thrust::less<int>()); // returns [input.begin() + 2, input.begin() + 2)
|
| 985 |
+
* thrust::equal_range(input.begin(), input.end(), 8, thrust::less<int>()); // returns [input.begin() + 4, input.end)
|
| 986 |
+
* thrust::equal_range(input.begin(), input.end(), 9, thrust::less<int>()); // returns [input.end(), input.end)
|
| 987 |
+
* \endcode
|
| 988 |
+
*
|
| 989 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal_range
|
| 990 |
+
* \see \p lower_bound
|
| 991 |
+
* \see \p upper_bound
|
| 992 |
+
* \see \p binary_search
|
| 993 |
+
*/
|
| 994 |
+
template <class ForwardIterator, class T, class StrictWeakOrdering>
|
| 995 |
+
thrust::pair<ForwardIterator, ForwardIterator>
|
| 996 |
+
equal_range(ForwardIterator first,
|
| 997 |
+
ForwardIterator last,
|
| 998 |
+
const T& value,
|
| 999 |
+
StrictWeakOrdering comp);
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
/*! \addtogroup vectorized_binary_search Vectorized Searches
|
| 1003 |
+
* \ingroup binary_search
|
| 1004 |
+
* \{
|
| 1005 |
+
*/
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
//////////////////////
|
| 1009 |
+
// Vector Functions //
|
| 1010 |
+
//////////////////////
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
/*! \p lower_bound is a vectorized version of binary search: for each
|
| 1014 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1015 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1016 |
+
* Specifically, it returns the index of first position where value could
|
| 1017 |
+
* be inserted without violating the ordering.
|
| 1018 |
+
*
|
| 1019 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 1020 |
+
*
|
| 1021 |
+
* \param exec The execution policy to use for parallelization.
|
| 1022 |
+
* \param first The beginning of the ordered sequence.
|
| 1023 |
+
* \param last The end of the ordered sequence.
|
| 1024 |
+
* \param values_first The beginning of the search values sequence.
|
| 1025 |
+
* \param values_last The end of the search values sequence.
|
| 1026 |
+
* \param result The beginning of the output sequence.
|
| 1027 |
+
*
|
| 1028 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 1029 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1030 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1031 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1032 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1033 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1034 |
+
*
|
| 1035 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1036 |
+
*
|
| 1037 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 1038 |
+
* to search for multiple values in a ordered range using the \p thrust::device execution policy for
|
| 1039 |
+
* parallelization:
|
| 1040 |
+
*
|
| 1041 |
+
* \code
|
| 1042 |
+
* #include <thrust/binary_search.h>
|
| 1043 |
+
* #include <thrust/device_vector.h>
|
| 1044 |
+
* #include <thrust/execution_policy.h>
|
| 1045 |
+
* ...
|
| 1046 |
+
* thrust::device_vector<int> input(5);
|
| 1047 |
+
*
|
| 1048 |
+
* input[0] = 0;
|
| 1049 |
+
* input[1] = 2;
|
| 1050 |
+
* input[2] = 5;
|
| 1051 |
+
* input[3] = 7;
|
| 1052 |
+
* input[4] = 8;
|
| 1053 |
+
*
|
| 1054 |
+
* thrust::device_vector<int> values(6);
|
| 1055 |
+
* values[0] = 0;
|
| 1056 |
+
* values[1] = 1;
|
| 1057 |
+
* values[2] = 2;
|
| 1058 |
+
* values[3] = 3;
|
| 1059 |
+
* values[4] = 8;
|
| 1060 |
+
* values[5] = 9;
|
| 1061 |
+
*
|
| 1062 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1063 |
+
*
|
| 1064 |
+
* thrust::lower_bound(thrust::device,
|
| 1065 |
+
* input.begin(), input.end(),
|
| 1066 |
+
* values.begin(), values.end(),
|
| 1067 |
+
* output.begin());
|
| 1068 |
+
*
|
| 1069 |
+
* // output is now [0, 1, 1, 2, 4, 5]
|
| 1070 |
+
* \endcode
|
| 1071 |
+
*
|
| 1072 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 1073 |
+
* \see \p upper_bound
|
| 1074 |
+
* \see \p equal_range
|
| 1075 |
+
* \see \p binary_search
|
| 1076 |
+
*/
|
| 1077 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
|
| 1078 |
+
__host__ __device__
|
| 1079 |
+
OutputIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 1080 |
+
ForwardIterator first,
|
| 1081 |
+
ForwardIterator last,
|
| 1082 |
+
InputIterator values_first,
|
| 1083 |
+
InputIterator values_last,
|
| 1084 |
+
OutputIterator result);
|
| 1085 |
+
|
| 1086 |
+
|
| 1087 |
+
/*! \p lower_bound is a vectorized version of binary search: for each
|
| 1088 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1089 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1090 |
+
* Specifically, it returns the index of first position where value could
|
| 1091 |
+
* be inserted without violating the ordering.
|
| 1092 |
+
*
|
| 1093 |
+
* \param first The beginning of the ordered sequence.
|
| 1094 |
+
* \param last The end of the ordered sequence.
|
| 1095 |
+
* \param values_first The beginning of the search values sequence.
|
| 1096 |
+
* \param values_last The end of the search values sequence.
|
| 1097 |
+
* \param result The beginning of the output sequence.
|
| 1098 |
+
*
|
| 1099 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1100 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1101 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1102 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1103 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1104 |
+
*
|
| 1105 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1106 |
+
*
|
| 1107 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 1108 |
+
* to search for multiple values in a ordered range.
|
| 1109 |
+
*
|
| 1110 |
+
* \code
|
| 1111 |
+
* #include <thrust/binary_search.h>
|
| 1112 |
+
* #include <thrust/device_vector.h>
|
| 1113 |
+
* ...
|
| 1114 |
+
* thrust::device_vector<int> input(5);
|
| 1115 |
+
*
|
| 1116 |
+
* input[0] = 0;
|
| 1117 |
+
* input[1] = 2;
|
| 1118 |
+
* input[2] = 5;
|
| 1119 |
+
* input[3] = 7;
|
| 1120 |
+
* input[4] = 8;
|
| 1121 |
+
*
|
| 1122 |
+
* thrust::device_vector<int> values(6);
|
| 1123 |
+
* values[0] = 0;
|
| 1124 |
+
* values[1] = 1;
|
| 1125 |
+
* values[2] = 2;
|
| 1126 |
+
* values[3] = 3;
|
| 1127 |
+
* values[4] = 8;
|
| 1128 |
+
* values[5] = 9;
|
| 1129 |
+
*
|
| 1130 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1131 |
+
*
|
| 1132 |
+
* thrust::lower_bound(input.begin(), input.end(),
|
| 1133 |
+
* values.begin(), values.end(),
|
| 1134 |
+
* output.begin());
|
| 1135 |
+
*
|
| 1136 |
+
* // output is now [0, 1, 1, 2, 4, 5]
|
| 1137 |
+
* \endcode
|
| 1138 |
+
*
|
| 1139 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 1140 |
+
* \see \p upper_bound
|
| 1141 |
+
* \see \p equal_range
|
| 1142 |
+
* \see \p binary_search
|
| 1143 |
+
*/
|
| 1144 |
+
template <class ForwardIterator, class InputIterator, class OutputIterator>
|
| 1145 |
+
OutputIterator lower_bound(ForwardIterator first,
|
| 1146 |
+
ForwardIterator last,
|
| 1147 |
+
InputIterator values_first,
|
| 1148 |
+
InputIterator values_last,
|
| 1149 |
+
OutputIterator result);
|
| 1150 |
+
|
| 1151 |
+
|
| 1152 |
+
/*! \p lower_bound is a vectorized version of binary search: for each
|
| 1153 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1154 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1155 |
+
* Specifically, it returns the index of first position where value could
|
| 1156 |
+
* be inserted without violating the ordering. This version of
|
| 1157 |
+
* \p lower_bound uses function object \c comp for comparison.
|
| 1158 |
+
*
|
| 1159 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 1160 |
+
*
|
| 1161 |
+
* \param exec The execution policy to use for parallelization.
|
| 1162 |
+
* \param first The beginning of the ordered sequence.
|
| 1163 |
+
* \param last The end of the ordered sequence.
|
| 1164 |
+
* \param values_first The beginning of the search values sequence.
|
| 1165 |
+
* \param values_last The end of the search values sequence.
|
| 1166 |
+
* \param result The beginning of the output sequence.
|
| 1167 |
+
* \param comp The comparison operator.
|
| 1168 |
+
*
|
| 1169 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 1170 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1171 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1172 |
+
* and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type.
|
| 1173 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1174 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1175 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 1176 |
+
*
|
| 1177 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1178 |
+
*
|
| 1179 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 1180 |
+
* to search for multiple values in a ordered range.
|
| 1181 |
+
*
|
| 1182 |
+
* \code
|
| 1183 |
+
* #include <thrust/binary_search.h>
|
| 1184 |
+
* #include <thrust/device_vector.h>
|
| 1185 |
+
* #include <thrust/functional.h>
|
| 1186 |
+
* #include <thrust/execution_policy.h>
|
| 1187 |
+
* ...
|
| 1188 |
+
* thrust::device_vector<int> input(5);
|
| 1189 |
+
*
|
| 1190 |
+
* input[0] = 0;
|
| 1191 |
+
* input[1] = 2;
|
| 1192 |
+
* input[2] = 5;
|
| 1193 |
+
* input[3] = 7;
|
| 1194 |
+
* input[4] = 8;
|
| 1195 |
+
*
|
| 1196 |
+
* thrust::device_vector<int> values(6);
|
| 1197 |
+
* values[0] = 0;
|
| 1198 |
+
* values[1] = 1;
|
| 1199 |
+
* values[2] = 2;
|
| 1200 |
+
* values[3] = 3;
|
| 1201 |
+
* values[4] = 8;
|
| 1202 |
+
* values[5] = 9;
|
| 1203 |
+
*
|
| 1204 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1205 |
+
*
|
| 1206 |
+
* thrust::lower_bound(input.begin(), input.end(),
|
| 1207 |
+
* values.begin(), values.end(),
|
| 1208 |
+
* output.begin(),
|
| 1209 |
+
* thrust::less<int>());
|
| 1210 |
+
*
|
| 1211 |
+
* // output is now [0, 1, 1, 2, 4, 5]
|
| 1212 |
+
* \endcode
|
| 1213 |
+
*
|
| 1214 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 1215 |
+
* \see \p upper_bound
|
| 1216 |
+
* \see \p equal_range
|
| 1217 |
+
* \see \p binary_search
|
| 1218 |
+
*/
|
| 1219 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
|
| 1220 |
+
__host__ __device__
|
| 1221 |
+
OutputIterator lower_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 1222 |
+
ForwardIterator first,
|
| 1223 |
+
ForwardIterator last,
|
| 1224 |
+
InputIterator values_first,
|
| 1225 |
+
InputIterator values_last,
|
| 1226 |
+
OutputIterator result,
|
| 1227 |
+
StrictWeakOrdering comp);
|
| 1228 |
+
|
| 1229 |
+
|
| 1230 |
+
/*! \p lower_bound is a vectorized version of binary search: for each
|
| 1231 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1232 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1233 |
+
* Specifically, it returns the index of first position where value could
|
| 1234 |
+
* be inserted without violating the ordering. This version of
|
| 1235 |
+
* \p lower_bound uses function object \c comp for comparison.
|
| 1236 |
+
*
|
| 1237 |
+
* \param first The beginning of the ordered sequence.
|
| 1238 |
+
* \param last The end of the ordered sequence.
|
| 1239 |
+
* \param values_first The beginning of the search values sequence.
|
| 1240 |
+
* \param values_last The end of the search values sequence.
|
| 1241 |
+
* \param result The beginning of the output sequence.
|
| 1242 |
+
* \param comp The comparison operator.
|
| 1243 |
+
*
|
| 1244 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1245 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1246 |
+
* and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type.
|
| 1247 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1248 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1249 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 1250 |
+
*
|
| 1251 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1252 |
+
*
|
| 1253 |
+
* The following code snippet demonstrates how to use \p lower_bound
|
| 1254 |
+
* to search for multiple values in a ordered range.
|
| 1255 |
+
*
|
| 1256 |
+
* \code
|
| 1257 |
+
* #include <thrust/binary_search.h>
|
| 1258 |
+
* #include <thrust/device_vector.h>
|
| 1259 |
+
* #include <thrust/functional.h>
|
| 1260 |
+
* ...
|
| 1261 |
+
* thrust::device_vector<int> input(5);
|
| 1262 |
+
*
|
| 1263 |
+
* input[0] = 0;
|
| 1264 |
+
* input[1] = 2;
|
| 1265 |
+
* input[2] = 5;
|
| 1266 |
+
* input[3] = 7;
|
| 1267 |
+
* input[4] = 8;
|
| 1268 |
+
*
|
| 1269 |
+
* thrust::device_vector<int> values(6);
|
| 1270 |
+
* values[0] = 0;
|
| 1271 |
+
* values[1] = 1;
|
| 1272 |
+
* values[2] = 2;
|
| 1273 |
+
* values[3] = 3;
|
| 1274 |
+
* values[4] = 8;
|
| 1275 |
+
* values[5] = 9;
|
| 1276 |
+
*
|
| 1277 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1278 |
+
*
|
| 1279 |
+
* thrust::lower_bound(input.begin(), input.end(),
|
| 1280 |
+
* values.begin(), values.end(),
|
| 1281 |
+
* output.begin(),
|
| 1282 |
+
* thrust::less<int>());
|
| 1283 |
+
*
|
| 1284 |
+
* // output is now [0, 1, 1, 2, 4, 5]
|
| 1285 |
+
* \endcode
|
| 1286 |
+
*
|
| 1287 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/lower_bound
|
| 1288 |
+
* \see \p upper_bound
|
| 1289 |
+
* \see \p equal_range
|
| 1290 |
+
* \see \p binary_search
|
| 1291 |
+
*/
|
| 1292 |
+
template <class ForwardIterator, class InputIterator, class OutputIterator, class StrictWeakOrdering>
|
| 1293 |
+
OutputIterator lower_bound(ForwardIterator first,
|
| 1294 |
+
ForwardIterator last,
|
| 1295 |
+
InputIterator values_first,
|
| 1296 |
+
InputIterator values_last,
|
| 1297 |
+
OutputIterator result,
|
| 1298 |
+
StrictWeakOrdering comp);
|
| 1299 |
+
|
| 1300 |
+
|
| 1301 |
+
/*! \p upper_bound is a vectorized version of binary search: for each
|
| 1302 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1303 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1304 |
+
* Specifically, it returns the index of last position where value could
|
| 1305 |
+
* be inserted without violating the ordering.
|
| 1306 |
+
*
|
| 1307 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 1308 |
+
*
|
| 1309 |
+
* \param exec The execution policy to use for parallelization.
|
| 1310 |
+
* \param first The beginning of the ordered sequence.
|
| 1311 |
+
* \param last The end of the ordered sequence.
|
| 1312 |
+
* \param values_first The beginning of the search values sequence.
|
| 1313 |
+
* \param values_last The end of the search values sequence.
|
| 1314 |
+
* \param result The beginning of the output sequence.
|
| 1315 |
+
*
|
| 1316 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 1317 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1318 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1319 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1320 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1321 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1322 |
+
*
|
| 1323 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1324 |
+
*
|
| 1325 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 1326 |
+
* to search for multiple values in a ordered range using the \p thrust::device execution policy for
|
| 1327 |
+
* parallelization:
|
| 1328 |
+
*
|
| 1329 |
+
* \code
|
| 1330 |
+
* #include <thrust/binary_search.h>
|
| 1331 |
+
* #include <thrust/device_vector.h>
|
| 1332 |
+
* #include <thrust/execution_policy.h>
|
| 1333 |
+
* ...
|
| 1334 |
+
* thrust::device_vector<int> input(5);
|
| 1335 |
+
*
|
| 1336 |
+
* input[0] = 0;
|
| 1337 |
+
* input[1] = 2;
|
| 1338 |
+
* input[2] = 5;
|
| 1339 |
+
* input[3] = 7;
|
| 1340 |
+
* input[4] = 8;
|
| 1341 |
+
*
|
| 1342 |
+
* thrust::device_vector<int> values(6);
|
| 1343 |
+
* values[0] = 0;
|
| 1344 |
+
* values[1] = 1;
|
| 1345 |
+
* values[2] = 2;
|
| 1346 |
+
* values[3] = 3;
|
| 1347 |
+
* values[4] = 8;
|
| 1348 |
+
* values[5] = 9;
|
| 1349 |
+
*
|
| 1350 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1351 |
+
*
|
| 1352 |
+
* thrust::upper_bound(thrust::device,
|
| 1353 |
+
* input.begin(), input.end(),
|
| 1354 |
+
* values.begin(), values.end(),
|
| 1355 |
+
* output.begin());
|
| 1356 |
+
*
|
| 1357 |
+
* // output is now [1, 1, 2, 2, 5, 5]
|
| 1358 |
+
* \endcode
|
| 1359 |
+
*
|
| 1360 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 1361 |
+
* \see \p upper_bound
|
| 1362 |
+
* \see \p equal_range
|
| 1363 |
+
* \see \p binary_search
|
| 1364 |
+
*/
|
| 1365 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
|
| 1366 |
+
__host__ __device__
|
| 1367 |
+
OutputIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 1368 |
+
ForwardIterator first,
|
| 1369 |
+
ForwardIterator last,
|
| 1370 |
+
InputIterator values_first,
|
| 1371 |
+
InputIterator values_last,
|
| 1372 |
+
OutputIterator result);
|
| 1373 |
+
|
| 1374 |
+
|
| 1375 |
+
/*! \p upper_bound is a vectorized version of binary search: for each
|
| 1376 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1377 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1378 |
+
* Specifically, it returns the index of last position where value could
|
| 1379 |
+
* be inserted without violating the ordering.
|
| 1380 |
+
*
|
| 1381 |
+
* \param first The beginning of the ordered sequence.
|
| 1382 |
+
* \param last The end of the ordered sequence.
|
| 1383 |
+
* \param values_first The beginning of the search values sequence.
|
| 1384 |
+
* \param values_last The end of the search values sequence.
|
| 1385 |
+
* \param result The beginning of the output sequence.
|
| 1386 |
+
*
|
| 1387 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1388 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1389 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1390 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1391 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1392 |
+
*
|
| 1393 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1394 |
+
*
|
| 1395 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 1396 |
+
* to search for multiple values in a ordered range.
|
| 1397 |
+
*
|
| 1398 |
+
* \code
|
| 1399 |
+
* #include <thrust/binary_search.h>
|
| 1400 |
+
* #include <thrust/device_vector.h>
|
| 1401 |
+
* ...
|
| 1402 |
+
* thrust::device_vector<int> input(5);
|
| 1403 |
+
*
|
| 1404 |
+
* input[0] = 0;
|
| 1405 |
+
* input[1] = 2;
|
| 1406 |
+
* input[2] = 5;
|
| 1407 |
+
* input[3] = 7;
|
| 1408 |
+
* input[4] = 8;
|
| 1409 |
+
*
|
| 1410 |
+
* thrust::device_vector<int> values(6);
|
| 1411 |
+
* values[0] = 0;
|
| 1412 |
+
* values[1] = 1;
|
| 1413 |
+
* values[2] = 2;
|
| 1414 |
+
* values[3] = 3;
|
| 1415 |
+
* values[4] = 8;
|
| 1416 |
+
* values[5] = 9;
|
| 1417 |
+
*
|
| 1418 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1419 |
+
*
|
| 1420 |
+
* thrust::upper_bound(input.begin(), input.end(),
|
| 1421 |
+
* values.begin(), values.end(),
|
| 1422 |
+
* output.begin());
|
| 1423 |
+
*
|
| 1424 |
+
* // output is now [1, 1, 2, 2, 5, 5]
|
| 1425 |
+
* \endcode
|
| 1426 |
+
*
|
| 1427 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 1428 |
+
* \see \p upper_bound
|
| 1429 |
+
* \see \p equal_range
|
| 1430 |
+
* \see \p binary_search
|
| 1431 |
+
*/
|
| 1432 |
+
template <class ForwardIterator, class InputIterator, class OutputIterator>
|
| 1433 |
+
OutputIterator upper_bound(ForwardIterator first,
|
| 1434 |
+
ForwardIterator last,
|
| 1435 |
+
InputIterator values_first,
|
| 1436 |
+
InputIterator values_last,
|
| 1437 |
+
OutputIterator result);
|
| 1438 |
+
|
| 1439 |
+
|
| 1440 |
+
/*! \p upper_bound is a vectorized version of binary search: for each
|
| 1441 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1442 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1443 |
+
* Specifically, it returns the index of first position where value could
|
| 1444 |
+
* be inserted without violating the ordering. This version of
|
| 1445 |
+
* \p upper_bound uses function object \c comp for comparison.
|
| 1446 |
+
*
|
| 1447 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 1448 |
+
*
|
| 1449 |
+
* \param exec The execution policy to use for parallelization.
|
| 1450 |
+
* \param first The beginning of the ordered sequence.
|
| 1451 |
+
* \param last The end of the ordered sequence.
|
| 1452 |
+
* \param values_first The beginning of the search values sequence.
|
| 1453 |
+
* \param values_last The end of the search values sequence.
|
| 1454 |
+
* \param result The beginning of the output sequence.
|
| 1455 |
+
* \param comp The comparison operator.
|
| 1456 |
+
*
|
| 1457 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 1458 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1459 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1460 |
+
* and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type.
|
| 1461 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1462 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1463 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 1464 |
+
*
|
| 1465 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1466 |
+
*
|
| 1467 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 1468 |
+
* to search for multiple values in a ordered range using the \p thrust::device execution policy for
|
| 1469 |
+
* parallelization:
|
| 1470 |
+
*
|
| 1471 |
+
* \code
|
| 1472 |
+
* #include <thrust/binary_search.h>
|
| 1473 |
+
* #include <thrust/device_vector.h>
|
| 1474 |
+
* #include <thrust/functional.h>
|
| 1475 |
+
* #include <thrust/execution_policy.h>
|
| 1476 |
+
* ...
|
| 1477 |
+
* thrust::device_vector<int> input(5);
|
| 1478 |
+
*
|
| 1479 |
+
* input[0] = 0;
|
| 1480 |
+
* input[1] = 2;
|
| 1481 |
+
* input[2] = 5;
|
| 1482 |
+
* input[3] = 7;
|
| 1483 |
+
* input[4] = 8;
|
| 1484 |
+
*
|
| 1485 |
+
* thrust::device_vector<int> values(6);
|
| 1486 |
+
* values[0] = 0;
|
| 1487 |
+
* values[1] = 1;
|
| 1488 |
+
* values[2] = 2;
|
| 1489 |
+
* values[3] = 3;
|
| 1490 |
+
* values[4] = 8;
|
| 1491 |
+
* values[5] = 9;
|
| 1492 |
+
*
|
| 1493 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1494 |
+
*
|
| 1495 |
+
* thrust::upper_bound(thrust::device,
|
| 1496 |
+
* input.begin(), input.end(),
|
| 1497 |
+
* values.begin(), values.end(),
|
| 1498 |
+
* output.begin(),
|
| 1499 |
+
* thrust::less<int>());
|
| 1500 |
+
*
|
| 1501 |
+
* // output is now [1, 1, 2, 2, 5, 5]
|
| 1502 |
+
* \endcode
|
| 1503 |
+
*
|
| 1504 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 1505 |
+
* \see \p lower_bound
|
| 1506 |
+
* \see \p equal_range
|
| 1507 |
+
* \see \p binary_search
|
| 1508 |
+
*/
|
| 1509 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
|
| 1510 |
+
__host__ __device__
|
| 1511 |
+
OutputIterator upper_bound(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 1512 |
+
ForwardIterator first,
|
| 1513 |
+
ForwardIterator last,
|
| 1514 |
+
InputIterator values_first,
|
| 1515 |
+
InputIterator values_last,
|
| 1516 |
+
OutputIterator result,
|
| 1517 |
+
StrictWeakOrdering comp);
|
| 1518 |
+
|
| 1519 |
+
|
| 1520 |
+
/*! \p upper_bound is a vectorized version of binary search: for each
|
| 1521 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1522 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1523 |
+
* Specifically, it returns the index of first position where value could
|
| 1524 |
+
* be inserted without violating the ordering. This version of
|
| 1525 |
+
* \p upper_bound uses function object \c comp for comparison.
|
| 1526 |
+
*
|
| 1527 |
+
* \param first The beginning of the ordered sequence.
|
| 1528 |
+
* \param last The end of the ordered sequence.
|
| 1529 |
+
* \param values_first The beginning of the search values sequence.
|
| 1530 |
+
* \param values_last The end of the search values sequence.
|
| 1531 |
+
* \param result The beginning of the output sequence.
|
| 1532 |
+
* \param comp The comparison operator.
|
| 1533 |
+
*
|
| 1534 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1535 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1536 |
+
* and \c InputIterator's \c value_type is comparable to \p ForwardIterator's \c value_type.
|
| 1537 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1538 |
+
* and \c ForwardIterator's difference_type is convertible to \c OutputIterator's \c value_type.
|
| 1539 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 1540 |
+
*
|
| 1541 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1542 |
+
*
|
| 1543 |
+
* The following code snippet demonstrates how to use \p upper_bound
|
| 1544 |
+
* to search for multiple values in a ordered range.
|
| 1545 |
+
*
|
| 1546 |
+
* \code
|
| 1547 |
+
* #include <thrust/binary_search.h>
|
| 1548 |
+
* #include <thrust/device_vector.h>
|
| 1549 |
+
* #include <thrust/functional.h>
|
| 1550 |
+
* ...
|
| 1551 |
+
* thrust::device_vector<int> input(5);
|
| 1552 |
+
*
|
| 1553 |
+
* input[0] = 0;
|
| 1554 |
+
* input[1] = 2;
|
| 1555 |
+
* input[2] = 5;
|
| 1556 |
+
* input[3] = 7;
|
| 1557 |
+
* input[4] = 8;
|
| 1558 |
+
*
|
| 1559 |
+
* thrust::device_vector<int> values(6);
|
| 1560 |
+
* values[0] = 0;
|
| 1561 |
+
* values[1] = 1;
|
| 1562 |
+
* values[2] = 2;
|
| 1563 |
+
* values[3] = 3;
|
| 1564 |
+
* values[4] = 8;
|
| 1565 |
+
* values[5] = 9;
|
| 1566 |
+
*
|
| 1567 |
+
* thrust::device_vector<unsigned int> output(6);
|
| 1568 |
+
*
|
| 1569 |
+
* thrust::upper_bound(input.begin(), input.end(),
|
| 1570 |
+
* values.begin(), values.end(),
|
| 1571 |
+
* output.begin(),
|
| 1572 |
+
* thrust::less<int>());
|
| 1573 |
+
*
|
| 1574 |
+
* // output is now [1, 1, 2, 2, 5, 5]
|
| 1575 |
+
* \endcode
|
| 1576 |
+
*
|
| 1577 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/upper_bound
|
| 1578 |
+
* \see \p lower_bound
|
| 1579 |
+
* \see \p equal_range
|
| 1580 |
+
* \see \p binary_search
|
| 1581 |
+
*/
|
| 1582 |
+
template <class ForwardIterator, class InputIterator, class OutputIterator, class StrictWeakOrdering>
|
| 1583 |
+
OutputIterator upper_bound(ForwardIterator first,
|
| 1584 |
+
ForwardIterator last,
|
| 1585 |
+
InputIterator values_first,
|
| 1586 |
+
InputIterator values_last,
|
| 1587 |
+
OutputIterator result,
|
| 1588 |
+
StrictWeakOrdering comp);
|
| 1589 |
+
|
| 1590 |
+
|
| 1591 |
+
/*! \p binary_search is a vectorized version of binary search: for each
|
| 1592 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1593 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1594 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 1595 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 1596 |
+
* exists.
|
| 1597 |
+
*
|
| 1598 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 1599 |
+
*
|
| 1600 |
+
* \param exec The execution policy to use for parallelization.
|
| 1601 |
+
* \param first The beginning of the ordered sequence.
|
| 1602 |
+
* \param last The end of the ordered sequence.
|
| 1603 |
+
* \param values_first The beginning of the search values sequence.
|
| 1604 |
+
* \param values_last The end of the search values sequence.
|
| 1605 |
+
* \param result The beginning of the output sequence.
|
| 1606 |
+
*
|
| 1607 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 1608 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1609 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1610 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1611 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1612 |
+
* and bool is convertible to \c OutputIterator's \c value_type.
|
| 1613 |
+
*
|
| 1614 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1615 |
+
*
|
| 1616 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 1617 |
+
* to search for multiple values in a ordered range using the \p thrust::device execution policy for
|
| 1618 |
+
* parallelization:
|
| 1619 |
+
*
|
| 1620 |
+
* \code
|
| 1621 |
+
* #include <thrust/binary_search.h>
|
| 1622 |
+
* #include <thrust/device_vector.h>
|
| 1623 |
+
* #include <thrust/execution_policy.h>
|
| 1624 |
+
* ...
|
| 1625 |
+
* thrust::device_vector<int> input(5);
|
| 1626 |
+
*
|
| 1627 |
+
* input[0] = 0;
|
| 1628 |
+
* input[1] = 2;
|
| 1629 |
+
* input[2] = 5;
|
| 1630 |
+
* input[3] = 7;
|
| 1631 |
+
* input[4] = 8;
|
| 1632 |
+
*
|
| 1633 |
+
* thrust::device_vector<int> values(6);
|
| 1634 |
+
* values[0] = 0;
|
| 1635 |
+
* values[1] = 1;
|
| 1636 |
+
* values[2] = 2;
|
| 1637 |
+
* values[3] = 3;
|
| 1638 |
+
* values[4] = 8;
|
| 1639 |
+
* values[5] = 9;
|
| 1640 |
+
*
|
| 1641 |
+
* thrust::device_vector<bool> output(6);
|
| 1642 |
+
*
|
| 1643 |
+
* thrust::binary_search(thrust::device,
|
| 1644 |
+
* input.begin(), input.end(),
|
| 1645 |
+
* values.begin(), values.end(),
|
| 1646 |
+
* output.begin());
|
| 1647 |
+
*
|
| 1648 |
+
* // output is now [true, false, true, false, true, false]
|
| 1649 |
+
* \endcode
|
| 1650 |
+
*
|
| 1651 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 1652 |
+
* \see \p lower_bound
|
| 1653 |
+
* \see \p upper_bound
|
| 1654 |
+
* \see \p equal_range
|
| 1655 |
+
*/
|
| 1656 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator>
|
| 1657 |
+
__host__ __device__
|
| 1658 |
+
OutputIterator binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 1659 |
+
ForwardIterator first,
|
| 1660 |
+
ForwardIterator last,
|
| 1661 |
+
InputIterator values_first,
|
| 1662 |
+
InputIterator values_last,
|
| 1663 |
+
OutputIterator result);
|
| 1664 |
+
|
| 1665 |
+
|
| 1666 |
+
/*! \p binary_search is a vectorized version of binary search: for each
|
| 1667 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1668 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1669 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 1670 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 1671 |
+
* exists.
|
| 1672 |
+
*
|
| 1673 |
+
* \param first The beginning of the ordered sequence.
|
| 1674 |
+
* \param last The end of the ordered sequence.
|
| 1675 |
+
* \param values_first The beginning of the search values sequence.
|
| 1676 |
+
* \param values_last The end of the search values sequence.
|
| 1677 |
+
* \param result The beginning of the output sequence.
|
| 1678 |
+
*
|
| 1679 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1680 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1681 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1682 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1683 |
+
* and bool is convertible to \c OutputIterator's \c value_type.
|
| 1684 |
+
*
|
| 1685 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1686 |
+
*
|
| 1687 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 1688 |
+
* to search for multiple values in a ordered range.
|
| 1689 |
+
*
|
| 1690 |
+
* \code
|
| 1691 |
+
* #include <thrust/binary_search.h>
|
| 1692 |
+
* #include <thrust/device_vector.h>
|
| 1693 |
+
* ...
|
| 1694 |
+
* thrust::device_vector<int> input(5);
|
| 1695 |
+
*
|
| 1696 |
+
* input[0] = 0;
|
| 1697 |
+
* input[1] = 2;
|
| 1698 |
+
* input[2] = 5;
|
| 1699 |
+
* input[3] = 7;
|
| 1700 |
+
* input[4] = 8;
|
| 1701 |
+
*
|
| 1702 |
+
* thrust::device_vector<int> values(6);
|
| 1703 |
+
* values[0] = 0;
|
| 1704 |
+
* values[1] = 1;
|
| 1705 |
+
* values[2] = 2;
|
| 1706 |
+
* values[3] = 3;
|
| 1707 |
+
* values[4] = 8;
|
| 1708 |
+
* values[5] = 9;
|
| 1709 |
+
*
|
| 1710 |
+
* thrust::device_vector<bool> output(6);
|
| 1711 |
+
*
|
| 1712 |
+
* thrust::binary_search(input.begin(), input.end(),
|
| 1713 |
+
* values.begin(), values.end(),
|
| 1714 |
+
* output.begin());
|
| 1715 |
+
*
|
| 1716 |
+
* // output is now [true, false, true, false, true, false]
|
| 1717 |
+
* \endcode
|
| 1718 |
+
*
|
| 1719 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 1720 |
+
* \see \p lower_bound
|
| 1721 |
+
* \see \p upper_bound
|
| 1722 |
+
* \see \p equal_range
|
| 1723 |
+
*/
|
| 1724 |
+
template <class ForwardIterator, class InputIterator, class OutputIterator>
|
| 1725 |
+
OutputIterator binary_search(ForwardIterator first,
|
| 1726 |
+
ForwardIterator last,
|
| 1727 |
+
InputIterator values_first,
|
| 1728 |
+
InputIterator values_last,
|
| 1729 |
+
OutputIterator result);
|
| 1730 |
+
|
| 1731 |
+
|
| 1732 |
+
/*! \p binary_search is a vectorized version of binary search: for each
|
| 1733 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1734 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1735 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 1736 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 1737 |
+
* exists. This version of \p binary_search uses function object
|
| 1738 |
+
* \c comp for comparison.
|
| 1739 |
+
*
|
| 1740 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 1741 |
+
*
|
| 1742 |
+
* \param exec The execution policy to use for parallelization.
|
| 1743 |
+
* \param first The beginning of the ordered sequence.
|
| 1744 |
+
* \param last The end of the ordered sequence.
|
| 1745 |
+
* \param values_first The beginning of the search values sequence.
|
| 1746 |
+
* \param values_last The end of the search values sequence.
|
| 1747 |
+
* \param result The beginning of the output sequence.
|
| 1748 |
+
* \param comp The comparison operator.
|
| 1749 |
+
*
|
| 1750 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 1751 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1752 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1753 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1754 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1755 |
+
* and bool is convertible to \c OutputIterator's \c value_type.
|
| 1756 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 1757 |
+
*
|
| 1758 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1759 |
+
*
|
| 1760 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 1761 |
+
* to search for multiple values in a ordered range using the \p thrust::device execution policy for
|
| 1762 |
+
* parallelization:
|
| 1763 |
+
*
|
| 1764 |
+
* \code
|
| 1765 |
+
* #include <thrust/binary_search.h>
|
| 1766 |
+
* #include <thrust/device_vector.h>
|
| 1767 |
+
* #include <thrust/functional.h>
|
| 1768 |
+
* #include <thrust/execution_policy.h>
|
| 1769 |
+
* ...
|
| 1770 |
+
* thrust::device_vector<int> input(5);
|
| 1771 |
+
*
|
| 1772 |
+
* input[0] = 0;
|
| 1773 |
+
* input[1] = 2;
|
| 1774 |
+
* input[2] = 5;
|
| 1775 |
+
* input[3] = 7;
|
| 1776 |
+
* input[4] = 8;
|
| 1777 |
+
*
|
| 1778 |
+
* thrust::device_vector<int> values(6);
|
| 1779 |
+
* values[0] = 0;
|
| 1780 |
+
* values[1] = 1;
|
| 1781 |
+
* values[2] = 2;
|
| 1782 |
+
* values[3] = 3;
|
| 1783 |
+
* values[4] = 8;
|
| 1784 |
+
* values[5] = 9;
|
| 1785 |
+
*
|
| 1786 |
+
* thrust::device_vector<bool> output(6);
|
| 1787 |
+
*
|
| 1788 |
+
* thrust::binary_search(thrust::device,
|
| 1789 |
+
* input.begin(), input.end(),
|
| 1790 |
+
* values.begin(), values.end(),
|
| 1791 |
+
* output.begin(),
|
| 1792 |
+
* thrust::less<T>());
|
| 1793 |
+
*
|
| 1794 |
+
* // output is now [true, false, true, false, true, false]
|
| 1795 |
+
* \endcode
|
| 1796 |
+
*
|
| 1797 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 1798 |
+
* \see \p lower_bound
|
| 1799 |
+
* \see \p upper_bound
|
| 1800 |
+
* \see \p equal_range
|
| 1801 |
+
*/
|
| 1802 |
+
template <typename DerivedPolicy, typename ForwardIterator, typename InputIterator, typename OutputIterator, typename StrictWeakOrdering>
|
| 1803 |
+
__host__ __device__
|
| 1804 |
+
OutputIterator binary_search(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 1805 |
+
ForwardIterator first,
|
| 1806 |
+
ForwardIterator last,
|
| 1807 |
+
InputIterator values_first,
|
| 1808 |
+
InputIterator values_last,
|
| 1809 |
+
OutputIterator result,
|
| 1810 |
+
StrictWeakOrdering comp);
|
| 1811 |
+
|
| 1812 |
+
|
| 1813 |
+
/*! \p binary_search is a vectorized version of binary search: for each
|
| 1814 |
+
* iterator \c v in <tt>[values_first, values_last)</tt> it attempts to
|
| 1815 |
+
* find the value <tt>*v</tt> in an ordered range <tt>[first, last)</tt>.
|
| 1816 |
+
* It returns \c true if an element that is equivalent to \c value
|
| 1817 |
+
* is present in <tt>[first, last)</tt> and \c false if no such element
|
| 1818 |
+
* exists. This version of \p binary_search uses function object
|
| 1819 |
+
* \c comp for comparison.
|
| 1820 |
+
*
|
| 1821 |
+
* \param first The beginning of the ordered sequence.
|
| 1822 |
+
* \param last The end of the ordered sequence.
|
| 1823 |
+
* \param values_first The beginning of the search values sequence.
|
| 1824 |
+
* \param values_last The end of the search values sequence.
|
| 1825 |
+
* \param result The beginning of the output sequence.
|
| 1826 |
+
* \param comp The comparison operator.
|
| 1827 |
+
*
|
| 1828 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>.
|
| 1829 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 1830 |
+
* and \c InputIterator's \c value_type is <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThanComparable</a>.
|
| 1831 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 1832 |
+
* and bool is convertible to \c OutputIterator's \c value_type.
|
| 1833 |
+
* \tparam StrictWeakOrdering is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
|
| 1834 |
+
*
|
| 1835 |
+
* \pre The ranges <tt>[first,last)</tt> and <tt>[result, result + (last - first))</tt> shall not overlap.
|
| 1836 |
+
*
|
| 1837 |
+
* The following code snippet demonstrates how to use \p binary_search
|
| 1838 |
+
* to search for multiple values in a ordered range.
|
| 1839 |
+
*
|
| 1840 |
+
* \code
|
| 1841 |
+
* #include <thrust/binary_search.h>
|
| 1842 |
+
* #include <thrust/device_vector.h>
|
| 1843 |
+
* #include <thrust/functional.h>
|
| 1844 |
+
* ...
|
| 1845 |
+
* thrust::device_vector<int> input(5);
|
| 1846 |
+
*
|
| 1847 |
+
* input[0] = 0;
|
| 1848 |
+
* input[1] = 2;
|
| 1849 |
+
* input[2] = 5;
|
| 1850 |
+
* input[3] = 7;
|
| 1851 |
+
* input[4] = 8;
|
| 1852 |
+
*
|
| 1853 |
+
* thrust::device_vector<int> values(6);
|
| 1854 |
+
* values[0] = 0;
|
| 1855 |
+
* values[1] = 1;
|
| 1856 |
+
* values[2] = 2;
|
| 1857 |
+
* values[3] = 3;
|
| 1858 |
+
* values[4] = 8;
|
| 1859 |
+
* values[5] = 9;
|
| 1860 |
+
*
|
| 1861 |
+
* thrust::device_vector<bool> output(6);
|
| 1862 |
+
*
|
| 1863 |
+
* thrust::binary_search(input.begin(), input.end(),
|
| 1864 |
+
* values.begin(), values.end(),
|
| 1865 |
+
* output.begin(),
|
| 1866 |
+
* thrust::less<T>());
|
| 1867 |
+
*
|
| 1868 |
+
* // output is now [true, false, true, false, true, false]
|
| 1869 |
+
* \endcode
|
| 1870 |
+
*
|
| 1871 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/binary_search
|
| 1872 |
+
* \see \p lower_bound
|
| 1873 |
+
* \see \p upper_bound
|
| 1874 |
+
* \see \p equal_range
|
| 1875 |
+
*/
|
| 1876 |
+
template <class ForwardIterator, class InputIterator, class OutputIterator, class StrictWeakOrdering>
|
| 1877 |
+
OutputIterator binary_search(ForwardIterator first,
|
| 1878 |
+
ForwardIterator last,
|
| 1879 |
+
InputIterator values_first,
|
| 1880 |
+
InputIterator values_last,
|
| 1881 |
+
OutputIterator result,
|
| 1882 |
+
StrictWeakOrdering comp);
|
| 1883 |
+
|
| 1884 |
+
|
| 1885 |
+
/*! \} // end vectorized_binary_search
|
| 1886 |
+
*/
|
| 1887 |
+
|
| 1888 |
+
|
| 1889 |
+
/*! \} // end binary_search
|
| 1890 |
+
*/
|
| 1891 |
+
|
| 1892 |
+
|
| 1893 |
+
/*! \} // end searching
|
| 1894 |
+
*/
|
| 1895 |
+
|
| 1896 |
+
THRUST_NAMESPACE_END
|
| 1897 |
+
|
| 1898 |
+
#include <thrust/detail/binary_search.inl>
|
| 1899 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/complex.h
ADDED
|
@@ -0,0 +1,1047 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2019 NVIDIA Corporation
|
| 3 |
+
* Copyright 2013 Filipe RNC Maia
|
| 4 |
+
*
|
| 5 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
* you may not use this file except in compliance with the License.
|
| 7 |
+
* You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*/
|
| 17 |
+
|
| 18 |
+
/*! \file complex.h
|
| 19 |
+
* \brief Complex numbers
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
|
| 26 |
+
#include <cmath>
|
| 27 |
+
#include <complex>
|
| 28 |
+
#include <sstream>
|
| 29 |
+
#include <thrust/detail/type_traits.h>
|
| 30 |
+
|
| 31 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 32 |
+
# define THRUST_STD_COMPLEX_REAL(z) \
|
| 33 |
+
reinterpret_cast< \
|
| 34 |
+
const typename thrust::detail::remove_reference<decltype(z)>::type::value_type (&)[2] \
|
| 35 |
+
>(z)[0]
|
| 36 |
+
# define THRUST_STD_COMPLEX_IMAG(z) \
|
| 37 |
+
reinterpret_cast< \
|
| 38 |
+
const typename thrust::detail::remove_reference<decltype(z)>::type::value_type (&)[2] \
|
| 39 |
+
>(z)[1]
|
| 40 |
+
# define THRUST_STD_COMPLEX_DEVICE __device__
|
| 41 |
+
#else
|
| 42 |
+
# define THRUST_STD_COMPLEX_REAL(z) (z).real()
|
| 43 |
+
# define THRUST_STD_COMPLEX_IMAG(z) (z).imag()
|
| 44 |
+
# define THRUST_STD_COMPLEX_DEVICE
|
| 45 |
+
#endif
|
| 46 |
+
|
| 47 |
+
THRUST_NAMESPACE_BEGIN
|
| 48 |
+
|
| 49 |
+
/*
|
| 50 |
+
* Calls to the standard math library from inside the thrust namespace
|
| 51 |
+
* with real arguments require explicit scope otherwise they will fail
|
| 52 |
+
* to resolve as it will find the equivalent complex function but then
|
| 53 |
+
* fail to match the template, and give up looking for other scopes.
|
| 54 |
+
*/
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
/*! \addtogroup numerics
|
| 58 |
+
* \{
|
| 59 |
+
*/
|
| 60 |
+
|
| 61 |
+
/*! \addtogroup complex_numbers Complex Numbers
|
| 62 |
+
* \{
|
| 63 |
+
*/
|
| 64 |
+
|
| 65 |
+
/*! \cond
|
| 66 |
+
*/
|
| 67 |
+
|
| 68 |
+
namespace detail
|
| 69 |
+
{
|
| 70 |
+
|
| 71 |
+
template <typename T, std::size_t Align>
|
| 72 |
+
struct complex_storage;
|
| 73 |
+
|
| 74 |
+
#if THRUST_CPP_DIALECT >= 2011 \
|
| 75 |
+
&& (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \
|
| 76 |
+
&& (THRUST_GCC_VERSION >= 40800)
|
| 77 |
+
// C++11 implementation, excluding GCC 4.7, which doesn't have `alignas`.
|
| 78 |
+
template <typename T, std::size_t Align>
|
| 79 |
+
struct complex_storage
|
| 80 |
+
{
|
| 81 |
+
struct alignas(Align) type { T x; T y; };
|
| 82 |
+
};
|
| 83 |
+
#elif (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC) \
|
| 84 |
+
|| ( (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) \
|
| 85 |
+
&& (THRUST_GCC_VERSION < 40600))
|
| 86 |
+
// C++03 implementation for MSVC and GCC <= 4.5.
|
| 87 |
+
//
|
| 88 |
+
// We have to implement `aligned_type` with specializations for MSVC
|
| 89 |
+
// and GCC 4.2 and older because they require literals as arguments to
|
| 90 |
+
// their alignment attribute.
|
| 91 |
+
|
| 92 |
+
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC)
|
| 93 |
+
// MSVC implementation.
|
| 94 |
+
#define THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(X) \
|
| 95 |
+
template <typename T> \
|
| 96 |
+
struct complex_storage<T, X> \
|
| 97 |
+
{ \
|
| 98 |
+
__declspec(align(X)) struct type { T x; T y; }; \
|
| 99 |
+
}; \
|
| 100 |
+
/**/
|
| 101 |
+
#else
|
| 102 |
+
// GCC <= 4.2 implementation.
|
| 103 |
+
#define THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(X) \
|
| 104 |
+
template <typename T> \
|
| 105 |
+
struct complex_storage<T, X> \
|
| 106 |
+
{ \
|
| 107 |
+
struct type { T x; T y; } __attribute__((aligned(X))); \
|
| 108 |
+
}; \
|
| 109 |
+
/**/
|
| 110 |
+
#endif
|
| 111 |
+
|
| 112 |
+
// The primary template is a fallback, which doesn't specify any alignment.
|
| 113 |
+
// It's only used when T is very large and we're using an older compilers
|
| 114 |
+
// which we have to fully specialize each alignment case.
|
| 115 |
+
template <typename T, std::size_t Align>
|
| 116 |
+
struct complex_storage
|
| 117 |
+
{
|
| 118 |
+
T x; T y;
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(1);
|
| 122 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(2);
|
| 123 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(4);
|
| 124 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(8);
|
| 125 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(16);
|
| 126 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(32);
|
| 127 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(64);
|
| 128 |
+
THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION(128);
|
| 129 |
+
|
| 130 |
+
#undef THRUST_DEFINE_COMPLEX_STORAGE_SPECIALIZATION
|
| 131 |
+
#else
|
| 132 |
+
// C++03 implementation for GCC > 4.5, Clang, PGI, ICPC, and xlC.
|
| 133 |
+
template <typename T, std::size_t Align>
|
| 134 |
+
struct complex_storage
|
| 135 |
+
{
|
| 136 |
+
struct type { T x; T y; } __attribute__((aligned(Align)));
|
| 137 |
+
};
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
} // end namespace detail
|
| 141 |
+
|
| 142 |
+
/*! \endcond
|
| 143 |
+
*/
|
| 144 |
+
|
| 145 |
+
/*! \p complex is the Thrust equivalent to <tt>std::complex</tt>. It is
|
| 146 |
+
* functionally identical to it, but can also be used in device code which
|
| 147 |
+
* <tt>std::complex</tt> currently cannot.
|
| 148 |
+
*
|
| 149 |
+
* \tparam T The type used to hold the real and imaginary parts. Should be
|
| 150 |
+
* <tt>float</tt> or <tt>double</tt>. Others types are not supported.
|
| 151 |
+
*
|
| 152 |
+
*/
|
| 153 |
+
template <typename T>
|
| 154 |
+
struct complex
|
| 155 |
+
{
|
| 156 |
+
public:
|
| 157 |
+
|
| 158 |
+
/*! \p value_type is the type of \p complex's real and imaginary parts.
|
| 159 |
+
*/
|
| 160 |
+
typedef T value_type;
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
/* --- Constructors --- */
|
| 165 |
+
|
| 166 |
+
/*! Construct a complex number with an imaginary part of 0.
|
| 167 |
+
*
|
| 168 |
+
* \param re The real part of the number.
|
| 169 |
+
*/
|
| 170 |
+
__host__ __device__
|
| 171 |
+
complex(const T& re);
|
| 172 |
+
|
| 173 |
+
/*! Construct a complex number from its real and imaginary parts.
|
| 174 |
+
*
|
| 175 |
+
* \param re The real part of the number.
|
| 176 |
+
* \param im The imaginary part of the number.
|
| 177 |
+
*/
|
| 178 |
+
__host__ __device__
|
| 179 |
+
complex(const T& re, const T& im);
|
| 180 |
+
|
| 181 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 182 |
+
/*! Default construct a complex number.
|
| 183 |
+
*/
|
| 184 |
+
complex() = default;
|
| 185 |
+
|
| 186 |
+
/*! This copy constructor copies from a \p complex with a type that is
|
| 187 |
+
* convertible to this \p complex's \c value_type.
|
| 188 |
+
*
|
| 189 |
+
* \param z The \p complex to copy from.
|
| 190 |
+
*/
|
| 191 |
+
complex(const complex<T>& z) = default;
|
| 192 |
+
#else
|
| 193 |
+
/*! Default construct a complex number.
|
| 194 |
+
*/
|
| 195 |
+
__host__ __device__
|
| 196 |
+
complex();
|
| 197 |
+
|
| 198 |
+
/*! This copy constructor copies from a \p complex with a type that is
|
| 199 |
+
* convertible to this \p complex's \c value_type.
|
| 200 |
+
*
|
| 201 |
+
* \param z The \p complex to copy from.
|
| 202 |
+
*/
|
| 203 |
+
__host__ __device__
|
| 204 |
+
complex(const complex<T>& z);
|
| 205 |
+
#endif
|
| 206 |
+
|
| 207 |
+
/*! This converting copy constructor copies from a \p complex with a type
|
| 208 |
+
* that is convertible to this \p complex's \c value_type.
|
| 209 |
+
*
|
| 210 |
+
* \param z The \p complex to copy from.
|
| 211 |
+
*
|
| 212 |
+
* \tparam U is convertible to \c value_type.
|
| 213 |
+
*/
|
| 214 |
+
template <typename U>
|
| 215 |
+
__host__ __device__
|
| 216 |
+
complex(const complex<U>& z);
|
| 217 |
+
|
| 218 |
+
/*! This converting copy constructor copies from a <tt>std::complex</tt> with
|
| 219 |
+
* a type that is convertible to this \p complex's \c value_type.
|
| 220 |
+
*
|
| 221 |
+
* \param z The \p complex to copy from.
|
| 222 |
+
*/
|
| 223 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 224 |
+
complex(const std::complex<T>& z);
|
| 225 |
+
|
| 226 |
+
/*! This converting copy constructor copies from a <tt>std::complex</tt> with
|
| 227 |
+
* a type that is convertible to this \p complex's \c value_type.
|
| 228 |
+
*
|
| 229 |
+
* \param z The \p complex to copy from.
|
| 230 |
+
*
|
| 231 |
+
* \tparam U is convertible to \c value_type.
|
| 232 |
+
*/
|
| 233 |
+
template <typename U>
|
| 234 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 235 |
+
complex(const std::complex<U>& z);
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
/* --- Assignment Operators --- */
|
| 240 |
+
|
| 241 |
+
/*! Assign `re` to the real part of this \p complex and set the imaginary part
|
| 242 |
+
* to 0.
|
| 243 |
+
*
|
| 244 |
+
* \param re The real part of the number.
|
| 245 |
+
*/
|
| 246 |
+
__host__ __device__
|
| 247 |
+
complex& operator=(const T& re);
|
| 248 |
+
|
| 249 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 250 |
+
/*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this
|
| 251 |
+
* \p complex respectively.
|
| 252 |
+
*
|
| 253 |
+
* \param z The \p complex to copy from.
|
| 254 |
+
*/
|
| 255 |
+
complex& operator=(const complex<T>& z) = default;
|
| 256 |
+
#else
|
| 257 |
+
/*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this
|
| 258 |
+
* \p complex respectively.
|
| 259 |
+
*
|
| 260 |
+
* \param z The \p complex to copy from.
|
| 261 |
+
*/
|
| 262 |
+
__host__ __device__
|
| 263 |
+
complex& operator=(const complex<T>& z);
|
| 264 |
+
#endif
|
| 265 |
+
|
| 266 |
+
/*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this
|
| 267 |
+
* \p complex respectively.
|
| 268 |
+
*
|
| 269 |
+
* \param z The \p complex to copy from.
|
| 270 |
+
*
|
| 271 |
+
* \tparam U is convertible to \c value_type.
|
| 272 |
+
*/
|
| 273 |
+
template <typename U>
|
| 274 |
+
__host__ __device__
|
| 275 |
+
complex& operator=(const complex<U>& z);
|
| 276 |
+
|
| 277 |
+
/*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this
|
| 278 |
+
* \p complex respectively.
|
| 279 |
+
*
|
| 280 |
+
* \param z The \p complex to copy from.
|
| 281 |
+
*/
|
| 282 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 283 |
+
complex& operator=(const std::complex<T>& z);
|
| 284 |
+
|
| 285 |
+
/*! Assign `z.real()` and `z.imag()` to the real and imaginary parts of this
|
| 286 |
+
* \p complex respectively.
|
| 287 |
+
*
|
| 288 |
+
* \param z The \p complex to copy from.
|
| 289 |
+
*
|
| 290 |
+
* \tparam U is convertible to \c value_type.
|
| 291 |
+
*/
|
| 292 |
+
template <typename U>
|
| 293 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 294 |
+
complex& operator=(const std::complex<U>& z);
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
/* --- Compound Assignment Operators --- */
|
| 298 |
+
|
| 299 |
+
/*! Adds a \p complex to this \p complex and assigns the result to this
|
| 300 |
+
* \p complex.
|
| 301 |
+
*
|
| 302 |
+
* \param z The \p complex to be added.
|
| 303 |
+
*
|
| 304 |
+
* \tparam U is convertible to \c value_type.
|
| 305 |
+
*/
|
| 306 |
+
template <typename U>
|
| 307 |
+
__host__ __device__
|
| 308 |
+
complex<T>& operator+=(const complex<U>& z);
|
| 309 |
+
|
| 310 |
+
/*! Subtracts a \p complex from this \p complex and assigns the result to
|
| 311 |
+
* this \p complex.
|
| 312 |
+
*
|
| 313 |
+
* \param z The \p complex to be subtracted.
|
| 314 |
+
*
|
| 315 |
+
* \tparam U is convertible to \c value_type.
|
| 316 |
+
*/
|
| 317 |
+
template <typename U>
|
| 318 |
+
__host__ __device__
|
| 319 |
+
complex<T>& operator-=(const complex<U>& z);
|
| 320 |
+
|
| 321 |
+
/*! Multiplies this \p complex by another \p complex and assigns the result
|
| 322 |
+
* to this \p complex.
|
| 323 |
+
*
|
| 324 |
+
* \param z The \p complex to be multiplied.
|
| 325 |
+
*
|
| 326 |
+
* \tparam U is convertible to \c value_type.
|
| 327 |
+
*/
|
| 328 |
+
template <typename U>
|
| 329 |
+
__host__ __device__
|
| 330 |
+
complex<T>& operator*=(const complex<U>& z);
|
| 331 |
+
|
| 332 |
+
/*! Divides this \p complex by another \p complex and assigns the result to
|
| 333 |
+
* this \p complex.
|
| 334 |
+
*
|
| 335 |
+
* \param z The \p complex to be divided.
|
| 336 |
+
*
|
| 337 |
+
* \tparam U is convertible to \c value_type.
|
| 338 |
+
*/
|
| 339 |
+
template <typename U>
|
| 340 |
+
__host__ __device__
|
| 341 |
+
complex<T>& operator/=(const complex<U>& z);
|
| 342 |
+
|
| 343 |
+
/*! Adds a scalar to this \p complex and assigns the result to this
|
| 344 |
+
* \p complex.
|
| 345 |
+
*
|
| 346 |
+
* \param z The \p complex to be added.
|
| 347 |
+
*
|
| 348 |
+
* \tparam U is convertible to \c value_type.
|
| 349 |
+
*/
|
| 350 |
+
template <typename U>
|
| 351 |
+
__host__ __device__
|
| 352 |
+
complex<T>& operator+=(const U& z);
|
| 353 |
+
|
| 354 |
+
/*! Subtracts a scalar from this \p complex and assigns the result to
|
| 355 |
+
* this \p complex.
|
| 356 |
+
*
|
| 357 |
+
* \param z The scalar to be subtracted.
|
| 358 |
+
*
|
| 359 |
+
* \tparam U is convertible to \c value_type.
|
| 360 |
+
*/
|
| 361 |
+
template <typename U>
|
| 362 |
+
__host__ __device__
|
| 363 |
+
complex<T>& operator-=(const U& z);
|
| 364 |
+
|
| 365 |
+
/*! Multiplies this \p complex by a scalar and assigns the result
|
| 366 |
+
* to this \p complex.
|
| 367 |
+
*
|
| 368 |
+
* \param z The scalar to be multiplied.
|
| 369 |
+
*
|
| 370 |
+
* \tparam U is convertible to \c value_type.
|
| 371 |
+
*/
|
| 372 |
+
template <typename U>
|
| 373 |
+
__host__ __device__
|
| 374 |
+
complex<T>& operator*=(const U& z);
|
| 375 |
+
|
| 376 |
+
/*! Divides this \p complex by a scalar and assigns the result to
|
| 377 |
+
* this \p complex.
|
| 378 |
+
*
|
| 379 |
+
* \param z The scalar to be divided.
|
| 380 |
+
*
|
| 381 |
+
* \tparam U is convertible to \c value_type.
|
| 382 |
+
*/
|
| 383 |
+
template <typename U>
|
| 384 |
+
__host__ __device__
|
| 385 |
+
complex<T>& operator/=(const U& z);
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
/* --- Getter functions ---
|
| 390 |
+
* The volatile ones are there to help for example
|
| 391 |
+
* with certain reductions optimizations
|
| 392 |
+
*/
|
| 393 |
+
|
| 394 |
+
/*! Returns the real part of this \p complex.
|
| 395 |
+
*/
|
| 396 |
+
__host__ __device__
|
| 397 |
+
T real() const volatile { return data.x; }
|
| 398 |
+
|
| 399 |
+
/*! Returns the imaginary part of this \p complex.
|
| 400 |
+
*/
|
| 401 |
+
__host__ __device__
|
| 402 |
+
T imag() const volatile { return data.y; }
|
| 403 |
+
|
| 404 |
+
/*! Returns the real part of this \p complex.
|
| 405 |
+
*/
|
| 406 |
+
__host__ __device__
|
| 407 |
+
T real() const { return data.x; }
|
| 408 |
+
|
| 409 |
+
/*! Returns the imaginary part of this \p complex.
|
| 410 |
+
*/
|
| 411 |
+
__host__ __device__
|
| 412 |
+
T imag() const { return data.y; }
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
/* --- Setter functions ---
|
| 417 |
+
* The volatile ones are there to help for example
|
| 418 |
+
* with certain reductions optimizations
|
| 419 |
+
*/
|
| 420 |
+
|
| 421 |
+
/*! Sets the real part of this \p complex.
|
| 422 |
+
*
|
| 423 |
+
* \param re The new real part of this \p complex.
|
| 424 |
+
*/
|
| 425 |
+
__host__ __device__
|
| 426 |
+
void real(T re) volatile { data.x = re; }
|
| 427 |
+
|
| 428 |
+
/*! Sets the imaginary part of this \p complex.
|
| 429 |
+
*
|
| 430 |
+
* \param im The new imaginary part of this \p complex.e
|
| 431 |
+
*/
|
| 432 |
+
__host__ __device__
|
| 433 |
+
void imag(T im) volatile { data.y = im; }
|
| 434 |
+
|
| 435 |
+
/*! Sets the real part of this \p complex.
|
| 436 |
+
*
|
| 437 |
+
* \param re The new real part of this \p complex.
|
| 438 |
+
*/
|
| 439 |
+
__host__ __device__
|
| 440 |
+
void real(T re) { data.x = re; }
|
| 441 |
+
|
| 442 |
+
/*! Sets the imaginary part of this \p complex.
|
| 443 |
+
*
|
| 444 |
+
* \param im The new imaginary part of this \p complex.
|
| 445 |
+
*/
|
| 446 |
+
__host__ __device__
|
| 447 |
+
void imag(T im) { data.y = im; }
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
/* --- Casting functions --- */
|
| 452 |
+
|
| 453 |
+
/*! Casts this \p complex to a <tt>std::complex</tt> of the same type.
|
| 454 |
+
*/
|
| 455 |
+
__host__
|
| 456 |
+
operator std::complex<T>() const { return std::complex<T>(real(), imag()); }
|
| 457 |
+
|
| 458 |
+
private:
|
| 459 |
+
typename detail::complex_storage<T, sizeof(T) * 2>::type data;
|
| 460 |
+
};
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
/* --- General Functions --- */
|
| 464 |
+
|
| 465 |
+
/*! Returns the magnitude (also known as absolute value) of a \p complex.
|
| 466 |
+
*
|
| 467 |
+
* \param z The \p complex from which to calculate the absolute value.
|
| 468 |
+
*/
|
| 469 |
+
template<typename T>
|
| 470 |
+
__host__ __device__
|
| 471 |
+
T abs(const complex<T>& z);
|
| 472 |
+
|
| 473 |
+
/*! Returns the phase angle (also known as argument) in radians of a \p complex.
|
| 474 |
+
*
|
| 475 |
+
* \param z The \p complex from which to calculate the phase angle.
|
| 476 |
+
*/
|
| 477 |
+
template <typename T>
|
| 478 |
+
__host__ __device__
|
| 479 |
+
T arg(const complex<T>& z);
|
| 480 |
+
|
| 481 |
+
/*! Returns the square of the magnitude of a \p complex.
|
| 482 |
+
*
|
| 483 |
+
* \param z The \p complex from which to calculate the norm.
|
| 484 |
+
*/
|
| 485 |
+
template <typename T>
|
| 486 |
+
__host__ __device__
|
| 487 |
+
T norm(const complex<T>& z);
|
| 488 |
+
|
| 489 |
+
/*! Returns the complex conjugate of a \p complex.
|
| 490 |
+
*
|
| 491 |
+
* \param z The \p complex from which to calculate the complex conjugate.
|
| 492 |
+
*/
|
| 493 |
+
template <typename T>
|
| 494 |
+
__host__ __device__
|
| 495 |
+
complex<T> conj(const complex<T>& z);
|
| 496 |
+
|
| 497 |
+
/*! Returns a \p complex with the specified magnitude and phase.
|
| 498 |
+
*
|
| 499 |
+
* \param m The magnitude of the returned \p complex.
|
| 500 |
+
* \param theta The phase of the returned \p complex in radians.
|
| 501 |
+
*/
|
| 502 |
+
template <typename T0, typename T1>
|
| 503 |
+
__host__ __device__
|
| 504 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 505 |
+
polar(const T0& m, const T1& theta = T1());
|
| 506 |
+
|
| 507 |
+
/*! Returns the projection of a \p complex on the Riemann sphere.
|
| 508 |
+
* For all finite \p complex it returns the argument. For \p complexs
|
| 509 |
+
* with a non finite part returns (INFINITY,+/-0) where the sign of
|
| 510 |
+
* the zero matches the sign of the imaginary part of the argument.
|
| 511 |
+
*
|
| 512 |
+
* \param z The \p complex argument.
|
| 513 |
+
*/
|
| 514 |
+
template <typename T>
|
| 515 |
+
__host__ __device__
|
| 516 |
+
complex<T> proj(const T& z);
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
/* --- Binary Arithmetic operators --- */
|
| 521 |
+
|
| 522 |
+
/*! Adds two \p complex numbers.
|
| 523 |
+
*
|
| 524 |
+
* The value types of the two \p complex types should be compatible and the
|
| 525 |
+
* type of the returned \p complex is the promoted type of the two arguments.
|
| 526 |
+
*
|
| 527 |
+
* \param x The first \p complex.
|
| 528 |
+
* \param y The second \p complex.
|
| 529 |
+
*/
|
| 530 |
+
template <typename T0, typename T1>
|
| 531 |
+
__host__ __device__
|
| 532 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 533 |
+
operator+(const complex<T0>& x, const complex<T1>& y);
|
| 534 |
+
|
| 535 |
+
/*! Adds a scalar to a \p complex number.
|
| 536 |
+
*
|
| 537 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 538 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 539 |
+
*
|
| 540 |
+
* \param x The \p complex.
|
| 541 |
+
* \param y The scalar.
|
| 542 |
+
*/
|
| 543 |
+
template <typename T0, typename T1>
|
| 544 |
+
__host__ __device__
|
| 545 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 546 |
+
operator+(const complex<T0>& x, const T1& y);
|
| 547 |
+
|
| 548 |
+
/*! Adds a \p complex number to a scalar.
|
| 549 |
+
*
|
| 550 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 551 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 552 |
+
*
|
| 553 |
+
* \param x The scalar.
|
| 554 |
+
* \param y The \p complex.
|
| 555 |
+
*/
|
| 556 |
+
template <typename T0, typename T1>
|
| 557 |
+
__host__ __device__
|
| 558 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 559 |
+
operator+(const T0& x, const complex<T1>& y);
|
| 560 |
+
|
| 561 |
+
/*! Subtracts two \p complex numbers.
|
| 562 |
+
*
|
| 563 |
+
* The value types of the two \p complex types should be compatible and the
|
| 564 |
+
* type of the returned \p complex is the promoted type of the two arguments.
|
| 565 |
+
*
|
| 566 |
+
* \param x The first \p complex (minuend).
|
| 567 |
+
* \param y The second \p complex (subtrahend).
|
| 568 |
+
*/
|
| 569 |
+
template <typename T0, typename T1>
|
| 570 |
+
__host__ __device__
|
| 571 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 572 |
+
operator-(const complex<T0>& x, const complex<T1>& y);
|
| 573 |
+
|
| 574 |
+
/*! Subtracts a scalar from a \p complex number.
|
| 575 |
+
*
|
| 576 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 577 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 578 |
+
*
|
| 579 |
+
* \param x The \p complex (minuend).
|
| 580 |
+
* \param y The scalar (subtrahend).
|
| 581 |
+
*/
|
| 582 |
+
template <typename T0, typename T1>
|
| 583 |
+
__host__ __device__
|
| 584 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 585 |
+
operator-(const complex<T0>& x, const T1& y);
|
| 586 |
+
|
| 587 |
+
/*! Subtracts a \p complex number from a scalar.
|
| 588 |
+
*
|
| 589 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 590 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 591 |
+
*
|
| 592 |
+
* \param x The scalar (minuend).
|
| 593 |
+
* \param y The \p complex (subtrahend).
|
| 594 |
+
*/
|
| 595 |
+
template <typename T0, typename T1>
|
| 596 |
+
__host__ __device__
|
| 597 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 598 |
+
operator-(const T0& x, const complex<T1>& y);
|
| 599 |
+
|
| 600 |
+
/*! Multiplies two \p complex numbers.
|
| 601 |
+
*
|
| 602 |
+
* The value types of the two \p complex types should be compatible and the
|
| 603 |
+
* type of the returned \p complex is the promoted type of the two arguments.
|
| 604 |
+
*
|
| 605 |
+
* \param x The first \p complex.
|
| 606 |
+
* \param y The second \p complex.
|
| 607 |
+
*/
|
| 608 |
+
template <typename T0, typename T1>
|
| 609 |
+
__host__ __device__
|
| 610 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 611 |
+
operator*(const complex<T0>& x, const complex<T1>& y);
|
| 612 |
+
|
| 613 |
+
/*! Multiplies a \p complex number by a scalar.
|
| 614 |
+
*
|
| 615 |
+
* \param x The \p complex.
|
| 616 |
+
* \param y The scalar.
|
| 617 |
+
*/
|
| 618 |
+
template <typename T0, typename T1>
|
| 619 |
+
__host__ __device__
|
| 620 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 621 |
+
operator*(const complex<T0>& x, const T1& y);
|
| 622 |
+
|
| 623 |
+
/*! Multiplies a scalar by a \p complex number.
|
| 624 |
+
*
|
| 625 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 626 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 627 |
+
*
|
| 628 |
+
* \param x The scalar.
|
| 629 |
+
* \param y The \p complex.
|
| 630 |
+
*/
|
| 631 |
+
template <typename T0, typename T1>
|
| 632 |
+
__host__ __device__
|
| 633 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 634 |
+
operator*(const T0& x, const complex<T1>& y);
|
| 635 |
+
|
| 636 |
+
/*! Divides two \p complex numbers.
|
| 637 |
+
*
|
| 638 |
+
* The value types of the two \p complex types should be compatible and the
|
| 639 |
+
* type of the returned \p complex is the promoted type of the two arguments.
|
| 640 |
+
*
|
| 641 |
+
* \param x The numerator (dividend).
|
| 642 |
+
* \param y The denomimator (divisor).
|
| 643 |
+
*/
|
| 644 |
+
template <typename T0, typename T1>
|
| 645 |
+
__host__ __device__
|
| 646 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 647 |
+
operator/(const complex<T0>& x, const complex<T1>& y);
|
| 648 |
+
|
| 649 |
+
/*! Divides a \p complex number by a scalar.
|
| 650 |
+
*
|
| 651 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 652 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 653 |
+
*
|
| 654 |
+
* \param x The complex numerator (dividend).
|
| 655 |
+
* \param y The scalar denomimator (divisor).
|
| 656 |
+
*/
|
| 657 |
+
template <typename T0, typename T1>
|
| 658 |
+
__host__ __device__
|
| 659 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 660 |
+
operator/(const complex<T0>& x, const T1& y);
|
| 661 |
+
|
| 662 |
+
/*! Divides a scalar by a \p complex number.
|
| 663 |
+
*
|
| 664 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 665 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 666 |
+
*
|
| 667 |
+
* \param x The scalar numerator (dividend).
|
| 668 |
+
* \param y The complex denomimator (divisor).
|
| 669 |
+
*/
|
| 670 |
+
template <typename T0, typename T1>
|
| 671 |
+
__host__ __device__
|
| 672 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 673 |
+
operator/(const T0& x, const complex<T1>& y);
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
/* --- Unary Arithmetic operators --- */
|
| 678 |
+
|
| 679 |
+
/*! Unary plus, returns its \p complex argument.
|
| 680 |
+
*
|
| 681 |
+
* \param y The \p complex argument.
|
| 682 |
+
*/
|
| 683 |
+
template <typename T>
|
| 684 |
+
__host__ __device__
|
| 685 |
+
complex<T>
|
| 686 |
+
operator+(const complex<T>& y);
|
| 687 |
+
|
| 688 |
+
/*! Unary minus, returns the additive inverse (negation) of its \p complex
|
| 689 |
+
* argument.
|
| 690 |
+
*
|
| 691 |
+
* \param y The \p complex argument.
|
| 692 |
+
*/
|
| 693 |
+
template <typename T>
|
| 694 |
+
__host__ __device__
|
| 695 |
+
complex<T>
|
| 696 |
+
operator-(const complex<T>& y);
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
/* --- Exponential Functions --- */
|
| 701 |
+
|
| 702 |
+
/*! Returns the complex exponential of a \p complex number.
|
| 703 |
+
*
|
| 704 |
+
* \param z The \p complex argument.
|
| 705 |
+
*/
|
| 706 |
+
template <typename T>
|
| 707 |
+
__host__ __device__
|
| 708 |
+
complex<T> exp(const complex<T>& z);
|
| 709 |
+
|
| 710 |
+
/*! Returns the complex natural logarithm of a \p complex number.
|
| 711 |
+
*
|
| 712 |
+
* \param z The \p complex argument.
|
| 713 |
+
*/
|
| 714 |
+
template <typename T>
|
| 715 |
+
__host__ __device__
|
| 716 |
+
complex<T> log(const complex<T>& z);
|
| 717 |
+
|
| 718 |
+
/*! Returns the complex base 10 logarithm of a \p complex number.
|
| 719 |
+
*
|
| 720 |
+
* \param z The \p complex argument.
|
| 721 |
+
*/
|
| 722 |
+
template <typename T>
|
| 723 |
+
__host__ __device__
|
| 724 |
+
complex<T> log10(const complex<T>& z);
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
/* --- Power Functions --- */
|
| 729 |
+
|
| 730 |
+
/*! Returns a \p complex number raised to another.
|
| 731 |
+
*
|
| 732 |
+
* The value types of the two \p complex types should be compatible and the
|
| 733 |
+
* type of the returned \p complex is the promoted type of the two arguments.
|
| 734 |
+
*
|
| 735 |
+
* \param x The base.
|
| 736 |
+
* \param y The exponent.
|
| 737 |
+
*/
|
| 738 |
+
template <typename T0, typename T1>
|
| 739 |
+
__host__ __device__
|
| 740 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 741 |
+
pow(const complex<T0>& x, const complex<T1>& y);
|
| 742 |
+
|
| 743 |
+
/*! Returns a \p complex number raised to a scalar.
|
| 744 |
+
*
|
| 745 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 746 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 747 |
+
*
|
| 748 |
+
* \param x The base.
|
| 749 |
+
* \param y The exponent.
|
| 750 |
+
*/
|
| 751 |
+
template <typename T0, typename T1>
|
| 752 |
+
__host__ __device__
|
| 753 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 754 |
+
pow(const complex<T0>& x, const T1& y);
|
| 755 |
+
|
| 756 |
+
/*! Returns a scalar raised to a \p complex number.
|
| 757 |
+
*
|
| 758 |
+
* The value type of the \p complex should be compatible with the scalar and
|
| 759 |
+
* the type of the returned \p complex is the promoted type of the two arguments.
|
| 760 |
+
*
|
| 761 |
+
* \param x The base.
|
| 762 |
+
* \param y The exponent.
|
| 763 |
+
*/
|
| 764 |
+
template <typename T0, typename T1>
|
| 765 |
+
__host__ __device__
|
| 766 |
+
complex<typename detail::promoted_numerical_type<T0, T1>::type>
|
| 767 |
+
pow(const T0& x, const complex<T1>& y);
|
| 768 |
+
|
| 769 |
+
/*! Returns the complex square root of a \p complex number.
|
| 770 |
+
*
|
| 771 |
+
* \param z The \p complex argument.
|
| 772 |
+
*/
|
| 773 |
+
template <typename T>
|
| 774 |
+
__host__ __device__
|
| 775 |
+
complex<T> sqrt(const complex<T>& z);
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
/* --- Trigonometric Functions --- */
|
| 779 |
+
|
| 780 |
+
/*! Returns the complex cosine of a \p complex number.
|
| 781 |
+
*
|
| 782 |
+
* \param z The \p complex argument.
|
| 783 |
+
*/
|
| 784 |
+
template <typename T>
|
| 785 |
+
__host__ __device__
|
| 786 |
+
complex<T> cos(const complex<T>& z);
|
| 787 |
+
|
| 788 |
+
/*! Returns the complex sine of a \p complex number.
|
| 789 |
+
*
|
| 790 |
+
* \param z The \p complex argument.
|
| 791 |
+
*/
|
| 792 |
+
template <typename T>
|
| 793 |
+
__host__ __device__
|
| 794 |
+
complex<T> sin(const complex<T>& z);
|
| 795 |
+
|
| 796 |
+
/*! Returns the complex tangent of a \p complex number.
|
| 797 |
+
*
|
| 798 |
+
* \param z The \p complex argument.
|
| 799 |
+
*/
|
| 800 |
+
template <typename T>
|
| 801 |
+
__host__ __device__
|
| 802 |
+
complex<T> tan(const complex<T>& z);
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
/* --- Hyperbolic Functions --- */
|
| 807 |
+
|
| 808 |
+
/*! Returns the complex hyperbolic cosine of a \p complex number.
|
| 809 |
+
*
|
| 810 |
+
* \param z The \p complex argument.
|
| 811 |
+
*/
|
| 812 |
+
template <typename T>
|
| 813 |
+
__host__ __device__
|
| 814 |
+
complex<T> cosh(const complex<T>& z);
|
| 815 |
+
|
| 816 |
+
/*! Returns the complex hyperbolic sine of a \p complex number.
|
| 817 |
+
*
|
| 818 |
+
* \param z The \p complex argument.
|
| 819 |
+
*/
|
| 820 |
+
template <typename T>
|
| 821 |
+
__host__ __device__
|
| 822 |
+
complex<T> sinh(const complex<T>& z);
|
| 823 |
+
|
| 824 |
+
/*! Returns the complex hyperbolic tangent of a \p complex number.
|
| 825 |
+
*
|
| 826 |
+
* \param z The \p complex argument.
|
| 827 |
+
*/
|
| 828 |
+
template <typename T>
|
| 829 |
+
__host__ __device__
|
| 830 |
+
complex<T> tanh(const complex<T>& z);
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
/* --- Inverse Trigonometric Functions --- */
|
| 835 |
+
|
| 836 |
+
/*! Returns the complex arc cosine of a \p complex number.
|
| 837 |
+
*
|
| 838 |
+
* The range of the real part of the result is [0, Pi] and
|
| 839 |
+
* the range of the imaginary part is [-inf, +inf]
|
| 840 |
+
*
|
| 841 |
+
* \param z The \p complex argument.
|
| 842 |
+
*/
|
| 843 |
+
template <typename T>
|
| 844 |
+
__host__ __device__
|
| 845 |
+
complex<T> acos(const complex<T>& z);
|
| 846 |
+
|
| 847 |
+
/*! Returns the complex arc sine of a \p complex number.
|
| 848 |
+
*
|
| 849 |
+
* The range of the real part of the result is [-Pi/2, Pi/2] and
|
| 850 |
+
* the range of the imaginary part is [-inf, +inf]
|
| 851 |
+
*
|
| 852 |
+
* \param z The \p complex argument.
|
| 853 |
+
*/
|
| 854 |
+
template <typename T>
|
| 855 |
+
__host__ __device__
|
| 856 |
+
complex<T> asin(const complex<T>& z);
|
| 857 |
+
|
| 858 |
+
/*! Returns the complex arc tangent of a \p complex number.
|
| 859 |
+
*
|
| 860 |
+
* The range of the real part of the result is [-Pi/2, Pi/2] and
|
| 861 |
+
* the range of the imaginary part is [-inf, +inf]
|
| 862 |
+
*
|
| 863 |
+
* \param z The \p complex argument.
|
| 864 |
+
*/
|
| 865 |
+
template <typename T>
|
| 866 |
+
__host__ __device__
|
| 867 |
+
complex<T> atan(const complex<T>& z);
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
|
| 871 |
+
/* --- Inverse Hyperbolic Functions --- */
|
| 872 |
+
|
| 873 |
+
/*! Returns the complex inverse hyperbolic cosine of a \p complex number.
|
| 874 |
+
*
|
| 875 |
+
* The range of the real part of the result is [0, +inf] and
|
| 876 |
+
* the range of the imaginary part is [-Pi, Pi]
|
| 877 |
+
*
|
| 878 |
+
* \param z The \p complex argument.
|
| 879 |
+
*/
|
| 880 |
+
template <typename T>
|
| 881 |
+
__host__ __device__
|
| 882 |
+
complex<T> acosh(const complex<T>& z);
|
| 883 |
+
|
| 884 |
+
/*! Returns the complex inverse hyperbolic sine of a \p complex number.
|
| 885 |
+
*
|
| 886 |
+
* The range of the real part of the result is [-inf, +inf] and
|
| 887 |
+
* the range of the imaginary part is [-Pi/2, Pi/2]
|
| 888 |
+
*
|
| 889 |
+
* \param z The \p complex argument.
|
| 890 |
+
*/
|
| 891 |
+
template <typename T>
|
| 892 |
+
__host__ __device__
|
| 893 |
+
complex<T> asinh(const complex<T>& z);
|
| 894 |
+
|
| 895 |
+
/*! Returns the complex inverse hyperbolic tangent of a \p complex number.
|
| 896 |
+
*
|
| 897 |
+
* The range of the real part of the result is [-inf, +inf] and
|
| 898 |
+
* the range of the imaginary part is [-Pi/2, Pi/2]
|
| 899 |
+
*
|
| 900 |
+
* \param z The \p complex argument.
|
| 901 |
+
*/
|
| 902 |
+
template <typename T>
|
| 903 |
+
__host__ __device__
|
| 904 |
+
complex<T> atanh(const complex<T>& z);
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
/* --- Stream Operators --- */
|
| 909 |
+
|
| 910 |
+
/*! Writes to an output stream a \p complex number in the form (real, imaginary).
|
| 911 |
+
*
|
| 912 |
+
* \param os The output stream.
|
| 913 |
+
* \param z The \p complex number to output.
|
| 914 |
+
*/
|
| 915 |
+
template <typename T, typename CharT, typename Traits>
|
| 916 |
+
std::basic_ostream<CharT, Traits>&
|
| 917 |
+
operator<<(std::basic_ostream<CharT, Traits>& os, const complex<T>& z);
|
| 918 |
+
|
| 919 |
+
/*! Reads a \p complex number from an input stream.
|
| 920 |
+
*
|
| 921 |
+
* The recognized formats are:
|
| 922 |
+
* - real
|
| 923 |
+
* - (real)
|
| 924 |
+
* - (real, imaginary)
|
| 925 |
+
*
|
| 926 |
+
* The values read must be convertible to the \p complex's \c value_type
|
| 927 |
+
*
|
| 928 |
+
* \param is The input stream.
|
| 929 |
+
* \param z The \p complex number to set.
|
| 930 |
+
*/
|
| 931 |
+
template <typename T, typename CharT, typename Traits>
|
| 932 |
+
__host__
|
| 933 |
+
std::basic_istream<CharT, Traits>&
|
| 934 |
+
operator>>(std::basic_istream<CharT, Traits>& is, complex<T>& z);
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
/* --- Equality Operators --- */
|
| 939 |
+
|
| 940 |
+
/*! Returns true if two \p complex numbers are equal and false otherwise.
|
| 941 |
+
*
|
| 942 |
+
* \param x The first \p complex.
|
| 943 |
+
* \param y The second \p complex.
|
| 944 |
+
*/
|
| 945 |
+
template <typename T0, typename T1>
|
| 946 |
+
__host__ __device__
|
| 947 |
+
bool operator==(const complex<T0>& x, const complex<T1>& y);
|
| 948 |
+
|
| 949 |
+
/*! Returns true if two \p complex numbers are equal and false otherwise.
|
| 950 |
+
*
|
| 951 |
+
* \param x The first \p complex.
|
| 952 |
+
* \param y The second \p complex.
|
| 953 |
+
*/
|
| 954 |
+
template <typename T0, typename T1>
|
| 955 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 956 |
+
bool operator==(const complex<T0>& x, const std::complex<T1>& y);
|
| 957 |
+
|
| 958 |
+
/*! Returns true if two \p complex numbers are equal and false otherwise.
|
| 959 |
+
*
|
| 960 |
+
* \param x The first \p complex.
|
| 961 |
+
* \param y The second \p complex.
|
| 962 |
+
*/
|
| 963 |
+
template <typename T0, typename T1>
|
| 964 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 965 |
+
bool operator==(const std::complex<T0>& x, const complex<T1>& y);
|
| 966 |
+
|
| 967 |
+
/*! Returns true if the imaginary part of the \p complex number is zero and
|
| 968 |
+
* the real part is equal to the scalar. Returns false otherwise.
|
| 969 |
+
*
|
| 970 |
+
* \param x The scalar.
|
| 971 |
+
* \param y The \p complex.
|
| 972 |
+
*/
|
| 973 |
+
template <typename T0, typename T1>
|
| 974 |
+
__host__ __device__
|
| 975 |
+
bool operator==(const T0& x, const complex<T1>& y);
|
| 976 |
+
|
| 977 |
+
/*! Returns true if the imaginary part of the \p complex number is zero and
|
| 978 |
+
* the real part is equal to the scalar. Returns false otherwise.
|
| 979 |
+
*
|
| 980 |
+
* \param x The \p complex.
|
| 981 |
+
* \param y The scalar.
|
| 982 |
+
*/
|
| 983 |
+
template <typename T0, typename T1>
|
| 984 |
+
__host__ __device__
|
| 985 |
+
bool operator==(const complex<T0>& x, const T1& y);
|
| 986 |
+
|
| 987 |
+
/*! Returns true if two \p complex numbers are different and false otherwise.
|
| 988 |
+
*
|
| 989 |
+
* \param x The first \p complex.
|
| 990 |
+
* \param y The second \p complex.
|
| 991 |
+
*/
|
| 992 |
+
template <typename T0, typename T1>
|
| 993 |
+
__host__ __device__
|
| 994 |
+
bool operator!=(const complex<T0>& x, const complex<T1>& y);
|
| 995 |
+
|
| 996 |
+
/*! Returns true if two \p complex numbers are different and false otherwise.
|
| 997 |
+
*
|
| 998 |
+
* \param x The first \p complex.
|
| 999 |
+
* \param y The second \p complex.
|
| 1000 |
+
*/
|
| 1001 |
+
template <typename T0, typename T1>
|
| 1002 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 1003 |
+
bool operator!=(const complex<T0>& x, const std::complex<T1>& y);
|
| 1004 |
+
|
| 1005 |
+
/*! Returns true if two \p complex numbers are different and false otherwise.
|
| 1006 |
+
*
|
| 1007 |
+
* \param x The first \p complex.
|
| 1008 |
+
* \param y The second \p complex.
|
| 1009 |
+
*/
|
| 1010 |
+
template <typename T0, typename T1>
|
| 1011 |
+
__host__ THRUST_STD_COMPLEX_DEVICE
|
| 1012 |
+
bool operator!=(const std::complex<T0>& x, const complex<T1>& y);
|
| 1013 |
+
|
| 1014 |
+
/*! Returns true if the imaginary part of the \p complex number is not zero or
|
| 1015 |
+
* the real part is different from the scalar. Returns false otherwise.
|
| 1016 |
+
*
|
| 1017 |
+
* \param x The scalar.
|
| 1018 |
+
* \param y The \p complex.
|
| 1019 |
+
*/
|
| 1020 |
+
template <typename T0, typename T1>
|
| 1021 |
+
__host__ __device__
|
| 1022 |
+
bool operator!=(const T0& x, const complex<T1>& y);
|
| 1023 |
+
|
| 1024 |
+
/*! Returns true if the imaginary part of the \p complex number is not zero or
|
| 1025 |
+
* the real part is different from the scalar. Returns false otherwise.
|
| 1026 |
+
*
|
| 1027 |
+
* \param x The \p complex.
|
| 1028 |
+
* \param y The scalar.
|
| 1029 |
+
*/
|
| 1030 |
+
template <typename T0, typename T1>
|
| 1031 |
+
__host__ __device__
|
| 1032 |
+
bool operator!=(const complex<T0>& x, const T1& y);
|
| 1033 |
+
|
| 1034 |
+
THRUST_NAMESPACE_END
|
| 1035 |
+
|
| 1036 |
+
#include <thrust/detail/complex/complex.inl>
|
| 1037 |
+
|
| 1038 |
+
#undef THRUST_STD_COMPLEX_REAL
|
| 1039 |
+
#undef THRUST_STD_COMPLEX_IMAG
|
| 1040 |
+
#undef THRUST_STD_COMPLEX_DEVICE
|
| 1041 |
+
|
| 1042 |
+
/*! \} // complex_numbers
|
| 1043 |
+
*/
|
| 1044 |
+
|
| 1045 |
+
/*! \} // numerics
|
| 1046 |
+
*/
|
| 1047 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/count.h
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file count.h
|
| 19 |
+
* \brief Counting elements in a range
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
#include <thrust/iterator/iterator_traits.h>
|
| 27 |
+
|
| 28 |
+
THRUST_NAMESPACE_BEGIN
|
| 29 |
+
|
| 30 |
+
/*! \addtogroup algorithms
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
/*! \addtogroup reductions
|
| 34 |
+
* \ingroup algorithms
|
| 35 |
+
* \{
|
| 36 |
+
*/
|
| 37 |
+
|
| 38 |
+
/*! \addtogroup counting
|
| 39 |
+
* \ingroup reductions
|
| 40 |
+
* \{
|
| 41 |
+
*/
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
/*! \p count finds the number of elements in <tt>[first,last)</tt> that are equal
|
| 45 |
+
* to \p value. More precisely, \p count returns the number of iterators \c i in
|
| 46 |
+
* <tt>[first, last)</tt> such that <tt>*i == value</tt>.
|
| 47 |
+
*
|
| 48 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 49 |
+
*
|
| 50 |
+
* \param exec The execution policy to use for parallelization.
|
| 51 |
+
* \param first The beginning of the sequence.
|
| 52 |
+
* \param last The end of the sequence.
|
| 53 |
+
* \param value The value to be counted.
|
| 54 |
+
* \return The number of elements equal to \p value.
|
| 55 |
+
*
|
| 56 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 57 |
+
* \tparam InputIterator must be a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a> and \c InputIterator's \c value_type must be a model of must be a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a>.
|
| 58 |
+
* \tparam EqualityComparable must be a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a> and can be compared for equality with \c InputIterator's \c value_type
|
| 59 |
+
*
|
| 60 |
+
* The following code snippet demonstrates how to use \p count to
|
| 61 |
+
* count the number of instances in a range of a value of interest using the \p thrust::device execution policy:
|
| 62 |
+
*
|
| 63 |
+
* \code
|
| 64 |
+
* #include <thrust/count.h>
|
| 65 |
+
* #include <thrust/device_vector.h>
|
| 66 |
+
* #include <thrust/execution_policy.h>
|
| 67 |
+
* ...
|
| 68 |
+
* // put 3 1s in a device_vector
|
| 69 |
+
* thrust::device_vector<int> vec(5,0);
|
| 70 |
+
* vec[1] = 1;
|
| 71 |
+
* vec[3] = 1;
|
| 72 |
+
* vec[4] = 1;
|
| 73 |
+
*
|
| 74 |
+
* // count the 1s
|
| 75 |
+
* int result = thrust::count(thrust::device, vec.begin(), vec.end(), 1);
|
| 76 |
+
* // result == 3
|
| 77 |
+
* \endcode
|
| 78 |
+
*
|
| 79 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/count
|
| 80 |
+
*/
|
| 81 |
+
template<typename DerivedPolicy, typename InputIterator, typename EqualityComparable>
|
| 82 |
+
__host__ __device__
|
| 83 |
+
typename thrust::iterator_traits<InputIterator>::difference_type
|
| 84 |
+
count(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, const EqualityComparable& value);
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
/*! \p count finds the number of elements in <tt>[first,last)</tt> that are equal
|
| 89 |
+
* to \p value. More precisely, \p count returns the number of iterators \c i in
|
| 90 |
+
* <tt>[first, last)</tt> such that <tt>*i == value</tt>.
|
| 91 |
+
*
|
| 92 |
+
* \param first The beginning of the sequence.
|
| 93 |
+
* \param last The end of the sequence.
|
| 94 |
+
* \param value The value to be counted.
|
| 95 |
+
* \return The number of elements equal to \p value.
|
| 96 |
+
*
|
| 97 |
+
* \tparam InputIterator must be a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a> and \c InputIterator's \c value_type must be a model of must be a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a>.
|
| 98 |
+
* \tparam EqualityComparable must be a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a> and can be compared for equality with \c InputIterator's \c value_type
|
| 99 |
+
*
|
| 100 |
+
* The following code snippet demonstrates how to use \p count to
|
| 101 |
+
* count the number of instances in a range of a value of interest.
|
| 102 |
+
* \code
|
| 103 |
+
* #include <thrust/count.h>
|
| 104 |
+
* #include <thrust/device_vector.h>
|
| 105 |
+
* ...
|
| 106 |
+
* // put 3 1s in a device_vector
|
| 107 |
+
* thrust::device_vector<int> vec(5,0);
|
| 108 |
+
* vec[1] = 1;
|
| 109 |
+
* vec[3] = 1;
|
| 110 |
+
* vec[4] = 1;
|
| 111 |
+
*
|
| 112 |
+
* // count the 1s
|
| 113 |
+
* int result = thrust::count(vec.begin(), vec.end(), 1);
|
| 114 |
+
* // result == 3
|
| 115 |
+
* \endcode
|
| 116 |
+
*
|
| 117 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/count
|
| 118 |
+
*/
|
| 119 |
+
template <typename InputIterator, typename EqualityComparable>
|
| 120 |
+
typename thrust::iterator_traits<InputIterator>::difference_type
|
| 121 |
+
count(InputIterator first, InputIterator last, const EqualityComparable& value);
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
/*! \p count_if finds the number of elements in <tt>[first,last)</tt> for which
|
| 125 |
+
* a predicate is \c true. More precisely, \p count_if returns the number of iterators
|
| 126 |
+
* \c i in <tt>[first, last)</tt> such that <tt>pred(*i) == true</tt>.
|
| 127 |
+
*
|
| 128 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 129 |
+
*
|
| 130 |
+
* \param exec The execution policy to use for parallelization.
|
| 131 |
+
* \param first The beginning of the sequence.
|
| 132 |
+
* \param last The end of the sequence.
|
| 133 |
+
* \param pred The predicate.
|
| 134 |
+
* \return The number of elements where \p pred is \c true.
|
| 135 |
+
*
|
| 136 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 137 |
+
* \tparam InputIterator must be a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a> and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type.
|
| 138 |
+
* \tparam Predicate must be a model of <a href="https://en.cppreference.com/w/cpp/concepts/predicate">Predicate</a>.
|
| 139 |
+
*
|
| 140 |
+
* The following code snippet demonstrates how to use \p count to
|
| 141 |
+
* count the number of odd numbers in a range using the \p thrust::device execution policy:
|
| 142 |
+
*
|
| 143 |
+
* \code
|
| 144 |
+
* #include <thrust/count.h>
|
| 145 |
+
* #include <thrust/device_vector.h>
|
| 146 |
+
* #include <thrust/execution_policy.h>
|
| 147 |
+
* ...
|
| 148 |
+
* struct is_odd
|
| 149 |
+
* {
|
| 150 |
+
* __host__ __device__
|
| 151 |
+
* bool operator()(int &x)
|
| 152 |
+
* {
|
| 153 |
+
* return x & 1;
|
| 154 |
+
* }
|
| 155 |
+
* };
|
| 156 |
+
* ...
|
| 157 |
+
* // fill a device_vector with even & odd numbers
|
| 158 |
+
* thrust::device_vector<int> vec(5);
|
| 159 |
+
* vec[0] = 0;
|
| 160 |
+
* vec[1] = 1;
|
| 161 |
+
* vec[2] = 2;
|
| 162 |
+
* vec[3] = 3;
|
| 163 |
+
* vec[4] = 4;
|
| 164 |
+
*
|
| 165 |
+
* // count the odd elements in vec
|
| 166 |
+
* int result = thrust::count_if(thrust::device, vec.begin(), vec.end(), is_odd());
|
| 167 |
+
* // result == 2
|
| 168 |
+
* \endcode
|
| 169 |
+
*
|
| 170 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/count
|
| 171 |
+
*/
|
| 172 |
+
template<typename DerivedPolicy, typename InputIterator, typename Predicate>
|
| 173 |
+
__host__ __device__
|
| 174 |
+
typename thrust::iterator_traits<InputIterator>::difference_type
|
| 175 |
+
count_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator first, InputIterator last, Predicate pred);
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
/*! \p count_if finds the number of elements in <tt>[first,last)</tt> for which
|
| 179 |
+
* a predicate is \c true. More precisely, \p count_if returns the number of iterators
|
| 180 |
+
* \c i in <tt>[first, last)</tt> such that <tt>pred(*i) == true</tt>.
|
| 181 |
+
*
|
| 182 |
+
* \param first The beginning of the sequence.
|
| 183 |
+
* \param last The end of the sequence.
|
| 184 |
+
* \param pred The predicate.
|
| 185 |
+
* \return The number of elements where \p pred is \c true.
|
| 186 |
+
*
|
| 187 |
+
* \tparam InputIterator must be a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a> and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type.
|
| 188 |
+
* \tparam Predicate must be a model of <a href="https://en.cppreference.com/w/cpp/concepts/predicate">Predicate</a>.
|
| 189 |
+
*
|
| 190 |
+
* The following code snippet demonstrates how to use \p count to
|
| 191 |
+
* count the number of odd numbers in a range.
|
| 192 |
+
* \code
|
| 193 |
+
* #include <thrust/count.h>
|
| 194 |
+
* #include <thrust/device_vector.h>
|
| 195 |
+
* ...
|
| 196 |
+
* struct is_odd
|
| 197 |
+
* {
|
| 198 |
+
* __host__ __device__
|
| 199 |
+
* bool operator()(int &x)
|
| 200 |
+
* {
|
| 201 |
+
* return x & 1;
|
| 202 |
+
* }
|
| 203 |
+
* };
|
| 204 |
+
* ...
|
| 205 |
+
* // fill a device_vector with even & odd numbers
|
| 206 |
+
* thrust::device_vector<int> vec(5);
|
| 207 |
+
* vec[0] = 0;
|
| 208 |
+
* vec[1] = 1;
|
| 209 |
+
* vec[2] = 2;
|
| 210 |
+
* vec[3] = 3;
|
| 211 |
+
* vec[4] = 4;
|
| 212 |
+
*
|
| 213 |
+
* // count the odd elements in vec
|
| 214 |
+
* int result = thrust::count_if(vec.begin(), vec.end(), is_odd());
|
| 215 |
+
* // result == 2
|
| 216 |
+
* \endcode
|
| 217 |
+
*
|
| 218 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/count
|
| 219 |
+
*/
|
| 220 |
+
template <typename InputIterator, typename Predicate>
|
| 221 |
+
typename thrust::iterator_traits<InputIterator>::difference_type
|
| 222 |
+
count_if(InputIterator first, InputIterator last, Predicate pred);
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
/*! \} // end counting
|
| 226 |
+
* \} // end reductions
|
| 227 |
+
*/
|
| 228 |
+
|
| 229 |
+
THRUST_NAMESPACE_END
|
| 230 |
+
|
| 231 |
+
#include <thrust/detail/count.h>
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/select_system.h
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2018 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#pragma once
|
| 18 |
+
|
| 19 |
+
#include <thrust/detail/config.h>
|
| 20 |
+
#include <thrust/detail/cpp11_required.h>
|
| 21 |
+
|
| 22 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/type_deduction.h>
|
| 25 |
+
#include <thrust/type_traits/remove_cvref.h>
|
| 26 |
+
#include <thrust/system/detail/generic/select_system.h>
|
| 27 |
+
|
| 28 |
+
THRUST_NAMESPACE_BEGIN
|
| 29 |
+
|
| 30 |
+
namespace detail
|
| 31 |
+
{
|
| 32 |
+
|
| 33 |
+
// We need a way to compute the return type of `select_system`, which is found
|
| 34 |
+
// by using `thrust::system::detail::generic::select_system` and then making an
|
| 35 |
+
// ADL call. We have no trait that defines the return type. With the
|
| 36 |
+
// limitations of C++11 return type deduction, we need to be able to stick all
|
| 37 |
+
// of that into `decltype`. So, we put the using statement into a detail
|
| 38 |
+
// namespace, and then implement the generic dispatch function in that
|
| 39 |
+
// namespace.
|
| 40 |
+
|
| 41 |
+
namespace select_system_detail
|
| 42 |
+
{
|
| 43 |
+
|
| 44 |
+
using thrust::system::detail::generic::select_system;
|
| 45 |
+
|
| 46 |
+
struct select_system_fn final
|
| 47 |
+
{
|
| 48 |
+
__thrust_exec_check_disable__
|
| 49 |
+
template <typename DerivedPolicy0>
|
| 50 |
+
__host__ __device__
|
| 51 |
+
auto operator()(
|
| 52 |
+
thrust::detail::execution_policy_base<DerivedPolicy0> const& exec0
|
| 53 |
+
) const
|
| 54 |
+
THRUST_DECLTYPE_RETURNS(
|
| 55 |
+
select_system(
|
| 56 |
+
thrust::detail::derived_cast(thrust::detail::strip_const(exec0))
|
| 57 |
+
)
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
__thrust_exec_check_disable__
|
| 61 |
+
template <typename DerivedPolicy0, typename DerivedPolicy1>
|
| 62 |
+
__host__ __device__
|
| 63 |
+
auto operator()(
|
| 64 |
+
thrust::detail::execution_policy_base<DerivedPolicy0> const& exec0
|
| 65 |
+
, thrust::detail::execution_policy_base<DerivedPolicy1> const& exec1
|
| 66 |
+
) const
|
| 67 |
+
THRUST_DECLTYPE_RETURNS(
|
| 68 |
+
select_system(
|
| 69 |
+
thrust::detail::derived_cast(thrust::detail::strip_const(exec0))
|
| 70 |
+
, thrust::detail::derived_cast(thrust::detail::strip_const(exec1))
|
| 71 |
+
)
|
| 72 |
+
)
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
} // namespace select_system_detail
|
| 76 |
+
|
| 77 |
+
THRUST_INLINE_CONSTANT select_system_detail::select_system_fn select_system{};
|
| 78 |
+
|
| 79 |
+
} // detail
|
| 80 |
+
|
| 81 |
+
THRUST_NAMESPACE_END
|
| 82 |
+
|
| 83 |
+
#endif // THRUST_CPP_DIALECT >= 2011
|
| 84 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/detail/tuple_meta_transform.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#pragma once
|
| 18 |
+
|
| 19 |
+
#include <thrust/detail/config.h>
|
| 20 |
+
|
| 21 |
+
#include <thrust/tuple.h>
|
| 22 |
+
#include <thrust/type_traits/integer_sequence.h>
|
| 23 |
+
|
| 24 |
+
THRUST_NAMESPACE_BEGIN
|
| 25 |
+
|
| 26 |
+
namespace detail
|
| 27 |
+
{
|
| 28 |
+
|
| 29 |
+
// introduce an intermediate type tuple_meta_transform_WAR_NVCC
|
| 30 |
+
// rather than directly specializing tuple_meta_transform with
|
| 31 |
+
// default argument IndexSequence = thrust::make_index_sequence<thrust::tuple_size<Tuple>::value>
|
| 32 |
+
// to workaround nvcc 11.0 compiler bug
|
| 33 |
+
template<typename Tuple,
|
| 34 |
+
template<typename> class UnaryMetaFunction,
|
| 35 |
+
typename IndexSequence>
|
| 36 |
+
struct tuple_meta_transform_WAR_NVCC;
|
| 37 |
+
|
| 38 |
+
template<typename Tuple,
|
| 39 |
+
template<typename> class UnaryMetaFunction,
|
| 40 |
+
size_t... Is>
|
| 41 |
+
struct tuple_meta_transform_WAR_NVCC<Tuple, UnaryMetaFunction, thrust::index_sequence<Is...>>
|
| 42 |
+
{
|
| 43 |
+
typedef thrust::tuple<
|
| 44 |
+
typename UnaryMetaFunction<typename thrust::tuple_element<Is,Tuple>::type>::type...
|
| 45 |
+
> type;
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
template<typename Tuple,
|
| 49 |
+
template<typename> class UnaryMetaFunction>
|
| 50 |
+
struct tuple_meta_transform
|
| 51 |
+
{
|
| 52 |
+
typedef typename tuple_meta_transform_WAR_NVCC<Tuple, UnaryMetaFunction, thrust::make_index_sequence<thrust::tuple_size<Tuple>::value>>::type type;
|
| 53 |
+
};
|
| 54 |
+
|
| 55 |
+
} // end detail
|
| 56 |
+
|
| 57 |
+
THRUST_NAMESPACE_END
|
| 58 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_free.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file
|
| 18 |
+
* \brief Deallocates storage allocated by \p device_malloc.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/device_ptr.h>
|
| 25 |
+
|
| 26 |
+
THRUST_NAMESPACE_BEGIN
|
| 27 |
+
|
| 28 |
+
/*! \addtogroup memory_management Memory Management
|
| 29 |
+
* \{
|
| 30 |
+
*/
|
| 31 |
+
|
| 32 |
+
/*! \p device_free deallocates memory allocated by the function \p device_malloc.
|
| 33 |
+
*
|
| 34 |
+
* \param ptr A \p device_ptr pointing to memory to be deallocated.
|
| 35 |
+
*
|
| 36 |
+
* The following code snippet demonstrates how to use \p device_free to
|
| 37 |
+
* deallocate memory allocated by \p device_malloc.
|
| 38 |
+
*
|
| 39 |
+
* \code
|
| 40 |
+
* #include <thrust/device_malloc.h>
|
| 41 |
+
* #include <thrust/device_free.h>
|
| 42 |
+
* ...
|
| 43 |
+
* // allocate some integers with device_malloc
|
| 44 |
+
* const int N = 100;
|
| 45 |
+
* thrust::device_ptr<int> int_array = thrust::device_malloc<int>(N);
|
| 46 |
+
*
|
| 47 |
+
* // manipulate integers
|
| 48 |
+
* ...
|
| 49 |
+
*
|
| 50 |
+
* // deallocate with device_free
|
| 51 |
+
* thrust::device_free(int_array);
|
| 52 |
+
* \endcode
|
| 53 |
+
*
|
| 54 |
+
* \see device_ptr
|
| 55 |
+
* \see device_malloc
|
| 56 |
+
*/
|
| 57 |
+
inline void device_free(thrust::device_ptr<void> ptr);
|
| 58 |
+
|
| 59 |
+
/*! \} // memory_management
|
| 60 |
+
*/
|
| 61 |
+
|
| 62 |
+
THRUST_NAMESPACE_END
|
| 63 |
+
|
| 64 |
+
#include <thrust/detail/device_free.inl>
|
| 65 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_make_unique.h
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2018 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file device_make_unique.h
|
| 19 |
+
* \brief A factory function for creating `unique_ptr`s to device objects.
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/cpp11_required.h>
|
| 26 |
+
|
| 27 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 28 |
+
|
| 29 |
+
#include <thrust/allocate_unique.h>
|
| 30 |
+
#include <thrust/device_new.h>
|
| 31 |
+
#include <thrust/device_ptr.h>
|
| 32 |
+
#include <thrust/device_allocator.h>
|
| 33 |
+
#include <thrust/detail/type_deduction.h>
|
| 34 |
+
|
| 35 |
+
THRUST_NAMESPACE_BEGIN
|
| 36 |
+
|
| 37 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 38 |
+
|
| 39 |
+
template <typename T, typename... Args>
|
| 40 |
+
__host__
|
| 41 |
+
auto device_make_unique(Args&&... args)
|
| 42 |
+
THRUST_TRAILING_RETURN(decltype(
|
| 43 |
+
uninitialized_allocate_unique<T>(device_allocator<T>{})
|
| 44 |
+
))
|
| 45 |
+
{
|
| 46 |
+
#if !defined(THRUST_DOXYGEN) // This causes Doxygen to choke for some reason.
|
| 47 |
+
// FIXME: This is crude - we construct an unnecessary T on the host for
|
| 48 |
+
// `device_new`. We need a proper dispatched `construct` algorithm to
|
| 49 |
+
// do this properly.
|
| 50 |
+
auto p = uninitialized_allocate_unique<T>(device_allocator<T>{});
|
| 51 |
+
device_new<T>(p.get(), T(THRUST_FWD(args)...));
|
| 52 |
+
return p;
|
| 53 |
+
#endif
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 57 |
+
|
| 58 |
+
THRUST_NAMESPACE_END
|
| 59 |
+
|
| 60 |
+
#endif // THRUST_CPP_DIALECT >= 2011
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc.h
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file
|
| 18 |
+
* \brief Allocates storage in device memory.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/device_ptr.h>
|
| 25 |
+
#include <cstddef> // for std::size_t
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup memory_management Memory Management
|
| 30 |
+
* \{
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
/*! This version of \p device_malloc allocates sequential device storage
|
| 34 |
+
* for bytes.
|
| 35 |
+
*
|
| 36 |
+
* \param n The number of bytes to allocate sequentially
|
| 37 |
+
* in device memory.
|
| 38 |
+
* \return A \p device_ptr to the newly allocated memory.
|
| 39 |
+
*
|
| 40 |
+
* The following code snippet demonstrates how to use \p device_malloc to
|
| 41 |
+
* allocate a range of device memory.
|
| 42 |
+
*
|
| 43 |
+
* \code
|
| 44 |
+
* #include <thrust/device_malloc.h>
|
| 45 |
+
* #include <thrust/device_free.h>
|
| 46 |
+
* ...
|
| 47 |
+
* // allocate some memory with device_malloc
|
| 48 |
+
* const int N = 100;
|
| 49 |
+
* thrust::device_ptr<void> void_ptr = thrust::device_malloc(N);
|
| 50 |
+
*
|
| 51 |
+
* // manipulate memory
|
| 52 |
+
* ...
|
| 53 |
+
*
|
| 54 |
+
* // deallocate with device_free
|
| 55 |
+
* thrust::device_free(void_ptr);
|
| 56 |
+
* \endcode
|
| 57 |
+
*
|
| 58 |
+
* \see device_ptr
|
| 59 |
+
* \see device_free
|
| 60 |
+
*/
|
| 61 |
+
inline thrust::device_ptr<void> device_malloc(const std::size_t n);
|
| 62 |
+
|
| 63 |
+
/*! This version of \p device_malloc allocates sequential device storage for
|
| 64 |
+
* new objects of the given type.
|
| 65 |
+
*
|
| 66 |
+
* \param n The number of objects of type T to allocate
|
| 67 |
+
* sequentially in device memory.
|
| 68 |
+
* \return A \p device_ptr to the newly allocated memory.
|
| 69 |
+
*
|
| 70 |
+
* The following code snippet demonstrates how to use \p device_malloc to
|
| 71 |
+
* allocate a range of device memory.
|
| 72 |
+
*
|
| 73 |
+
* \code
|
| 74 |
+
* #include <thrust/device_malloc.h>
|
| 75 |
+
* #include <thrust/device_free.h>
|
| 76 |
+
* ...
|
| 77 |
+
* // allocate some integers with device_malloc
|
| 78 |
+
* const int N = 100;
|
| 79 |
+
* thrust::device_ptr<int> int_array = thrust::device_malloc<int>(N);
|
| 80 |
+
*
|
| 81 |
+
* // manipulate integers
|
| 82 |
+
* ...
|
| 83 |
+
*
|
| 84 |
+
* // deallocate with device_free
|
| 85 |
+
* thrust::device_free(int_array);
|
| 86 |
+
* \endcode
|
| 87 |
+
*
|
| 88 |
+
* \see device_ptr
|
| 89 |
+
* \see device_free
|
| 90 |
+
*/
|
| 91 |
+
template<typename T>
|
| 92 |
+
inline thrust::device_ptr<T> device_malloc(const std::size_t n);
|
| 93 |
+
|
| 94 |
+
/*! \} // memory_management
|
| 95 |
+
*/
|
| 96 |
+
|
| 97 |
+
THRUST_NAMESPACE_END
|
| 98 |
+
|
| 99 |
+
#include <thrust/detail/device_malloc.inl>
|
| 100 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_malloc_allocator.h
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2018 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file
|
| 18 |
+
* \brief An allocator which allocates storage with \p device_malloc.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/device_ptr.h>
|
| 25 |
+
#include <thrust/device_reference.h>
|
| 26 |
+
#include <thrust/device_malloc.h>
|
| 27 |
+
#include <thrust/device_free.h>
|
| 28 |
+
#include <limits>
|
| 29 |
+
#include <stdexcept>
|
| 30 |
+
|
| 31 |
+
THRUST_NAMESPACE_BEGIN
|
| 32 |
+
|
| 33 |
+
// forward declarations to WAR circular #includes
|
| 34 |
+
template<typename> class device_ptr;
|
| 35 |
+
template<typename T> device_ptr<T> device_malloc(const std::size_t n);
|
| 36 |
+
|
| 37 |
+
/*! \addtogroup allocators Allocators
|
| 38 |
+
* \ingroup memory_management
|
| 39 |
+
* \{
|
| 40 |
+
*/
|
| 41 |
+
|
| 42 |
+
/*! \p device_malloc_allocator is a device memory allocator that employs the
|
| 43 |
+
* \p device_malloc function for allocation.
|
| 44 |
+
*
|
| 45 |
+
* \p device_malloc_allocator is deprecated in favor of <tt>thrust::mr</tt>
|
| 46 |
+
* memory resource-based allocators.
|
| 47 |
+
*
|
| 48 |
+
* \see device_malloc
|
| 49 |
+
* \see device_ptr
|
| 50 |
+
* \see device_allocator
|
| 51 |
+
* \see https://en.cppreference.com/w/cpp/memory/allocator
|
| 52 |
+
*/
|
| 53 |
+
template<typename T>
|
| 54 |
+
class device_malloc_allocator
|
| 55 |
+
{
|
| 56 |
+
public:
|
| 57 |
+
/*! Type of element allocated, \c T. */
|
| 58 |
+
typedef T value_type;
|
| 59 |
+
|
| 60 |
+
/*! Pointer to allocation, \c device_ptr<T>. */
|
| 61 |
+
typedef device_ptr<T> pointer;
|
| 62 |
+
|
| 63 |
+
/*! \c const pointer to allocation, \c device_ptr<const T>. */
|
| 64 |
+
typedef device_ptr<const T> const_pointer;
|
| 65 |
+
|
| 66 |
+
/*! Reference to allocated element, \c device_reference<T>. */
|
| 67 |
+
typedef device_reference<T> reference;
|
| 68 |
+
|
| 69 |
+
/*! \c const reference to allocated element, \c device_reference<const T>. */
|
| 70 |
+
typedef device_reference<const T> const_reference;
|
| 71 |
+
|
| 72 |
+
/*! Type of allocation size, \c std::size_t. */
|
| 73 |
+
typedef std::size_t size_type;
|
| 74 |
+
|
| 75 |
+
/*! Type of allocation difference, \c pointer::difference_type. */
|
| 76 |
+
typedef typename pointer::difference_type difference_type;
|
| 77 |
+
|
| 78 |
+
/*! The \p rebind metafunction provides the type of a \p device_malloc_allocator
|
| 79 |
+
* instantiated with another type.
|
| 80 |
+
*
|
| 81 |
+
* \tparam U The other type to use for instantiation.
|
| 82 |
+
*/
|
| 83 |
+
template<typename U>
|
| 84 |
+
struct rebind
|
| 85 |
+
{
|
| 86 |
+
/*! The typedef \p other gives the type of the rebound \p device_malloc_allocator.
|
| 87 |
+
*/
|
| 88 |
+
typedef device_malloc_allocator<U> other;
|
| 89 |
+
}; // end rebind
|
| 90 |
+
|
| 91 |
+
/*! No-argument constructor has no effect. */
|
| 92 |
+
__host__ __device__
|
| 93 |
+
inline device_malloc_allocator() {}
|
| 94 |
+
|
| 95 |
+
/*! No-argument destructor has no effect. */
|
| 96 |
+
__host__ __device__
|
| 97 |
+
inline ~device_malloc_allocator() {}
|
| 98 |
+
|
| 99 |
+
/*! Copy constructor has no effect. */
|
| 100 |
+
__host__ __device__
|
| 101 |
+
inline device_malloc_allocator(device_malloc_allocator const&) {}
|
| 102 |
+
|
| 103 |
+
/*! Constructor from other \p device_malloc_allocator has no effect. */
|
| 104 |
+
template<typename U>
|
| 105 |
+
__host__ __device__
|
| 106 |
+
inline device_malloc_allocator(device_malloc_allocator<U> const&) {}
|
| 107 |
+
|
| 108 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 109 |
+
device_malloc_allocator & operator=(const device_malloc_allocator &) = default;
|
| 110 |
+
#endif
|
| 111 |
+
|
| 112 |
+
/*! Returns the address of an allocated object.
|
| 113 |
+
* \return <tt>&r</tt>.
|
| 114 |
+
*/
|
| 115 |
+
__host__ __device__
|
| 116 |
+
inline pointer address(reference r) { return &r; }
|
| 117 |
+
|
| 118 |
+
/*! Returns the address an allocated object.
|
| 119 |
+
* \return <tt>&r</tt>.
|
| 120 |
+
*/
|
| 121 |
+
__host__ __device__
|
| 122 |
+
inline const_pointer address(const_reference r) { return &r; }
|
| 123 |
+
|
| 124 |
+
/*! Allocates storage for \p cnt objects.
|
| 125 |
+
* \param cnt The number of objects to allocate.
|
| 126 |
+
* \return A \p pointer to uninitialized storage for \p cnt objects.
|
| 127 |
+
* \note Memory allocated by this function must be deallocated with \p deallocate.
|
| 128 |
+
*/
|
| 129 |
+
__host__
|
| 130 |
+
inline pointer allocate(size_type cnt,
|
| 131 |
+
const_pointer = const_pointer(static_cast<T*>(0)))
|
| 132 |
+
{
|
| 133 |
+
if(cnt > this->max_size())
|
| 134 |
+
{
|
| 135 |
+
throw std::bad_alloc();
|
| 136 |
+
} // end if
|
| 137 |
+
|
| 138 |
+
return pointer(device_malloc<T>(cnt));
|
| 139 |
+
} // end allocate()
|
| 140 |
+
|
| 141 |
+
/*! Deallocates storage for objects allocated with \p allocate.
|
| 142 |
+
* \param p A \p pointer to the storage to deallocate.
|
| 143 |
+
* \param cnt The size of the previous allocation.
|
| 144 |
+
* \note Memory deallocated by this function must previously have been
|
| 145 |
+
* allocated with \p allocate.
|
| 146 |
+
*/
|
| 147 |
+
__host__
|
| 148 |
+
inline void deallocate(pointer p, size_type cnt)
|
| 149 |
+
{
|
| 150 |
+
// silence unused parameter warning while still leaving the parameter name for Doxygen
|
| 151 |
+
(void)(cnt);
|
| 152 |
+
|
| 153 |
+
device_free(p);
|
| 154 |
+
} // end deallocate()
|
| 155 |
+
|
| 156 |
+
/*! Returns the largest value \c n for which <tt>allocate(n)</tt> might succeed.
|
| 157 |
+
* \return The largest value \c n for which <tt>allocate(n)</tt> might succeed.
|
| 158 |
+
*/
|
| 159 |
+
inline size_type max_size() const
|
| 160 |
+
{
|
| 161 |
+
return (std::numeric_limits<size_type>::max)() / sizeof(T);
|
| 162 |
+
} // end max_size()
|
| 163 |
+
|
| 164 |
+
/*! Compares against another \p device_malloc_allocator for equality.
|
| 165 |
+
* \return \c true
|
| 166 |
+
*/
|
| 167 |
+
__host__ __device__
|
| 168 |
+
inline bool operator==(device_malloc_allocator const&) const { return true; }
|
| 169 |
+
|
| 170 |
+
/*! Compares against another \p device_malloc_allocator for inequality.
|
| 171 |
+
* \return \c false
|
| 172 |
+
*/
|
| 173 |
+
__host__ __device__
|
| 174 |
+
inline bool operator!=(device_malloc_allocator const &a) const {return !operator==(a); }
|
| 175 |
+
}; // end device_malloc_allocator
|
| 176 |
+
|
| 177 |
+
/*! \} // allocators
|
| 178 |
+
*/
|
| 179 |
+
|
| 180 |
+
THRUST_NAMESPACE_END
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new.h
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file device_new.h
|
| 19 |
+
* \brief Constructs new elements in device memory
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
|
| 26 |
+
// #include this for size_t
|
| 27 |
+
#include <cstddef>
|
| 28 |
+
#include <thrust/device_ptr.h>
|
| 29 |
+
|
| 30 |
+
THRUST_NAMESPACE_BEGIN
|
| 31 |
+
|
| 32 |
+
/*!
|
| 33 |
+
* \addtogroup memory_management Memory Management
|
| 34 |
+
* \{
|
| 35 |
+
*/
|
| 36 |
+
|
| 37 |
+
/*! \p device_new implements the placement \c new operator for types
|
| 38 |
+
* resident in device memory. \p device_new calls <tt>T</tt>'s null
|
| 39 |
+
* constructor on a array of objects in device memory.
|
| 40 |
+
* No memory is allocated by this function.
|
| 41 |
+
*
|
| 42 |
+
* \param p A \p device_ptr to a region of device memory into which
|
| 43 |
+
* to construct one or many <tt>T</tt>s.
|
| 44 |
+
* \param n The number of objects to construct at \p p.
|
| 45 |
+
* \return p, casted to <tt>T</tt>'s type.
|
| 46 |
+
*
|
| 47 |
+
* \see device_ptr
|
| 48 |
+
*/
|
| 49 |
+
template <typename T>
|
| 50 |
+
device_ptr<T> device_new(device_ptr<void> p,
|
| 51 |
+
const size_t n = 1);
|
| 52 |
+
|
| 53 |
+
/*! \p device_new implements the placement new operator for types
|
| 54 |
+
* resident in device memory. \p device_new calls <tt>T</tt>'s copy
|
| 55 |
+
* constructor on a array of objects in device memory. No memory is
|
| 56 |
+
* allocated by this function.
|
| 57 |
+
*
|
| 58 |
+
* \param p A \p device_ptr to a region of device memory into which to
|
| 59 |
+
* construct one or many <tt>T</tt>s.
|
| 60 |
+
* \param exemplar The value from which to copy.
|
| 61 |
+
* \param n The number of objects to construct at \p p.
|
| 62 |
+
* \return p, casted to <tt>T</tt>'s type.
|
| 63 |
+
*
|
| 64 |
+
* \see device_ptr
|
| 65 |
+
* \see fill
|
| 66 |
+
*/
|
| 67 |
+
template <typename T>
|
| 68 |
+
device_ptr<T> device_new(device_ptr<void> p,
|
| 69 |
+
const T &exemplar,
|
| 70 |
+
const size_t n = 1);
|
| 71 |
+
|
| 72 |
+
/*! \p device_new implements the new operator for types resident in device memory.
|
| 73 |
+
* It allocates device memory large enough to hold \p n new objects of type \c T.
|
| 74 |
+
*
|
| 75 |
+
* \param n The number of objects to allocate. Defaults to \c 1.
|
| 76 |
+
* \return A \p device_ptr to the newly allocated region of device memory.
|
| 77 |
+
*/
|
| 78 |
+
template <typename T>
|
| 79 |
+
device_ptr<T> device_new(const size_t n = 1);
|
| 80 |
+
|
| 81 |
+
/*! \} // memory_management
|
| 82 |
+
*/
|
| 83 |
+
|
| 84 |
+
THRUST_NAMESPACE_END
|
| 85 |
+
|
| 86 |
+
#include <thrust/detail/device_new.inl>
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_new_allocator.h
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file
|
| 18 |
+
* \brief An allocator which allocates storage with \p device_new.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/device_ptr.h>
|
| 25 |
+
#include <thrust/device_reference.h>
|
| 26 |
+
#include <thrust/device_new.h>
|
| 27 |
+
#include <thrust/device_delete.h>
|
| 28 |
+
|
| 29 |
+
#include <cuda/std/cstdint>
|
| 30 |
+
#include <cuda/std/limits>
|
| 31 |
+
|
| 32 |
+
#include <stdexcept>
|
| 33 |
+
|
| 34 |
+
THRUST_NAMESPACE_BEGIN
|
| 35 |
+
|
| 36 |
+
/*! \addtogroup allocators Allocators
|
| 37 |
+
* \ingroup memory_management
|
| 38 |
+
* \{
|
| 39 |
+
*/
|
| 40 |
+
|
| 41 |
+
/*! \p device_new_allocator is a device memory allocator that employs the
|
| 42 |
+
* \p device_new function for allocation.
|
| 43 |
+
*
|
| 44 |
+
* \see device_new
|
| 45 |
+
* \see device_ptr
|
| 46 |
+
* \see https://en.cppreference.com/w/cpp/memory/allocator
|
| 47 |
+
*/
|
| 48 |
+
template<typename T>
|
| 49 |
+
class device_new_allocator
|
| 50 |
+
{
|
| 51 |
+
public:
|
| 52 |
+
/*! Type of element allocated, \c T. */
|
| 53 |
+
typedef T value_type;
|
| 54 |
+
|
| 55 |
+
/*! Pointer to allocation, \c device_ptr<T>. */
|
| 56 |
+
typedef device_ptr<T> pointer;
|
| 57 |
+
|
| 58 |
+
/*! \c const pointer to allocation, \c device_ptr<const T>. */
|
| 59 |
+
typedef device_ptr<const T> const_pointer;
|
| 60 |
+
|
| 61 |
+
/*! Reference to allocated element, \c device_reference<T>. */
|
| 62 |
+
typedef device_reference<T> reference;
|
| 63 |
+
|
| 64 |
+
/*! \c const reference to allocated element, \c device_reference<const T>. */
|
| 65 |
+
typedef device_reference<const T> const_reference;
|
| 66 |
+
|
| 67 |
+
/*! Type of allocation size, \c ::cuda::std::size_t. */
|
| 68 |
+
typedef ::cuda::std::size_t size_type;
|
| 69 |
+
|
| 70 |
+
/*! Type of allocation difference, \c pointer::difference_type. */
|
| 71 |
+
typedef typename pointer::difference_type difference_type;
|
| 72 |
+
|
| 73 |
+
/*! The \p rebind metafunction provides the type of a \p device_new_allocator
|
| 74 |
+
* instantiated with another type.
|
| 75 |
+
*
|
| 76 |
+
* \tparam U The other type to use for instantiation.
|
| 77 |
+
*/
|
| 78 |
+
template<typename U>
|
| 79 |
+
struct rebind
|
| 80 |
+
{
|
| 81 |
+
/*! The typedef \p other gives the type of the rebound \p device_new_allocator.
|
| 82 |
+
*/
|
| 83 |
+
typedef device_new_allocator<U> other;
|
| 84 |
+
}; // end rebind
|
| 85 |
+
|
| 86 |
+
/*! No-argument constructor has no effect. */
|
| 87 |
+
__host__ __device__
|
| 88 |
+
inline device_new_allocator() {}
|
| 89 |
+
|
| 90 |
+
/*! No-argument destructor has no effect. */
|
| 91 |
+
__host__ __device__
|
| 92 |
+
inline ~device_new_allocator() {}
|
| 93 |
+
|
| 94 |
+
/*! Copy constructor has no effect. */
|
| 95 |
+
__host__ __device__
|
| 96 |
+
inline device_new_allocator(device_new_allocator const&) {}
|
| 97 |
+
|
| 98 |
+
/*! Constructor from other \p device_malloc_allocator has no effect. */
|
| 99 |
+
template<typename U>
|
| 100 |
+
__host__ __device__
|
| 101 |
+
inline device_new_allocator(device_new_allocator<U> const&) {}
|
| 102 |
+
|
| 103 |
+
/*! Returns the address of an allocated object.
|
| 104 |
+
* \return <tt>&r</tt>.
|
| 105 |
+
*/
|
| 106 |
+
__host__ __device__
|
| 107 |
+
inline pointer address(reference r) { return &r; }
|
| 108 |
+
|
| 109 |
+
/*! Returns the address an allocated object.
|
| 110 |
+
* \return <tt>&r</tt>.
|
| 111 |
+
*/
|
| 112 |
+
__host__ __device__
|
| 113 |
+
inline const_pointer address(const_reference r) { return &r; }
|
| 114 |
+
|
| 115 |
+
/*! Allocates storage for \p cnt objects.
|
| 116 |
+
* \param cnt The number of objects to allocate.
|
| 117 |
+
* \return A \p pointer to uninitialized storage for \p cnt objects.
|
| 118 |
+
* \note Memory allocated by this function must be deallocated with \p deallocate.
|
| 119 |
+
*/
|
| 120 |
+
__host__
|
| 121 |
+
inline pointer allocate(size_type cnt,
|
| 122 |
+
const_pointer = const_pointer(static_cast<T*>(0)))
|
| 123 |
+
{
|
| 124 |
+
if(cnt > this->max_size())
|
| 125 |
+
{
|
| 126 |
+
throw std::bad_alloc();
|
| 127 |
+
} // end if
|
| 128 |
+
|
| 129 |
+
// use "::operator new" rather than keyword new
|
| 130 |
+
return pointer(device_new<T>(cnt));
|
| 131 |
+
} // end allocate()
|
| 132 |
+
|
| 133 |
+
/*! Deallocates storage for objects allocated with \p allocate.
|
| 134 |
+
* \param p A \p pointer to the storage to deallocate.
|
| 135 |
+
* \param cnt The size of the previous allocation.
|
| 136 |
+
* \note Memory deallocated by this function must previously have been
|
| 137 |
+
* allocated with \p allocate.
|
| 138 |
+
*/
|
| 139 |
+
__host__
|
| 140 |
+
inline void deallocate(pointer p, size_type cnt)
|
| 141 |
+
{
|
| 142 |
+
// use "::operator delete" rather than keyword delete
|
| 143 |
+
(void)cnt;
|
| 144 |
+
device_delete(p);
|
| 145 |
+
} // end deallocate()
|
| 146 |
+
|
| 147 |
+
/*! Returns the largest value \c n for which <tt>allocate(n)</tt> might succeed.
|
| 148 |
+
* \return The largest value \c n for which <tt>allocate(n)</tt> might succeed.
|
| 149 |
+
*/
|
| 150 |
+
__host__ __device__
|
| 151 |
+
inline size_type max_size() const
|
| 152 |
+
{
|
| 153 |
+
return ::cuda::std::numeric_limits<size_type>::max THRUST_PREVENT_MACRO_SUBSTITUTION () / sizeof(T);
|
| 154 |
+
} // end max_size()
|
| 155 |
+
|
| 156 |
+
/*! Compares against another \p device_malloc_allocator for equality.
|
| 157 |
+
* \return \c true
|
| 158 |
+
*/
|
| 159 |
+
__host__ __device__
|
| 160 |
+
inline bool operator==(device_new_allocator const&) { return true; }
|
| 161 |
+
|
| 162 |
+
/*! Compares against another \p device_malloc_allocator for inequality.
|
| 163 |
+
* \return \c false
|
| 164 |
+
*/
|
| 165 |
+
__host__ __device__
|
| 166 |
+
inline bool operator!=(device_new_allocator const &a) {return !operator==(a); }
|
| 167 |
+
}; // end device_new_allocator
|
| 168 |
+
|
| 169 |
+
/*! \} // allocators
|
| 170 |
+
*/
|
| 171 |
+
|
| 172 |
+
THRUST_NAMESPACE_END
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_reference.h
ADDED
|
@@ -0,0 +1,987 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file
|
| 18 |
+
* \brief A reference to an object which resides in memory associated with the
|
| 19 |
+
* device system.
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/device_ptr.h>
|
| 26 |
+
#include <thrust/detail/type_traits.h>
|
| 27 |
+
#include <thrust/detail/reference.h>
|
| 28 |
+
|
| 29 |
+
THRUST_NAMESPACE_BEGIN
|
| 30 |
+
|
| 31 |
+
/*! \addtogroup memory_management Memory Management
|
| 32 |
+
* \{
|
| 33 |
+
*/
|
| 34 |
+
|
| 35 |
+
/*! \p device_reference acts as a reference-like object to an object stored in device memory.
|
| 36 |
+
* \p device_reference is not intended to be used directly; rather, this type
|
| 37 |
+
* is the result of deferencing a \p device_ptr. Similarly, taking the address of
|
| 38 |
+
* a \p device_reference yields a \p device_ptr.
|
| 39 |
+
*
|
| 40 |
+
* \p device_reference may often be used from host code in place of operations defined on
|
| 41 |
+
* its associated \c value_type. For example, when \p device_reference refers to an
|
| 42 |
+
* arithmetic type, arithmetic operations on it are legal:
|
| 43 |
+
*
|
| 44 |
+
* \code
|
| 45 |
+
* #include <thrust/device_vector.h>
|
| 46 |
+
*
|
| 47 |
+
* int main(void)
|
| 48 |
+
* {
|
| 49 |
+
* thrust::device_vector<int> vec(1, 13);
|
| 50 |
+
*
|
| 51 |
+
* thrust::device_reference<int> ref_to_thirteen = vec[0];
|
| 52 |
+
*
|
| 53 |
+
* int x = ref_to_thirteen + 1;
|
| 54 |
+
*
|
| 55 |
+
* // x is 14
|
| 56 |
+
*
|
| 57 |
+
* return 0;
|
| 58 |
+
* }
|
| 59 |
+
* \endcode
|
| 60 |
+
*
|
| 61 |
+
* Similarly, we can print the value of \c ref_to_thirteen in the above code by using an
|
| 62 |
+
* \c iostream:
|
| 63 |
+
*
|
| 64 |
+
* \code
|
| 65 |
+
* #include <thrust/device_vector.h>
|
| 66 |
+
* #include <iostream>
|
| 67 |
+
*
|
| 68 |
+
* int main(void)
|
| 69 |
+
* {
|
| 70 |
+
* thrust::device_vector<int> vec(1, 13);
|
| 71 |
+
*
|
| 72 |
+
* thrust::device_reference<int> ref_to_thirteen = vec[0];
|
| 73 |
+
*
|
| 74 |
+
* std::cout << ref_to_thirteen << std::endl;
|
| 75 |
+
*
|
| 76 |
+
* // 13 is printed
|
| 77 |
+
*
|
| 78 |
+
* return 0;
|
| 79 |
+
* }
|
| 80 |
+
* \endcode
|
| 81 |
+
*
|
| 82 |
+
* Of course, we needn't explicitly create a \p device_reference in the previous
|
| 83 |
+
* example, because one is returned by \p device_vector's bracket operator. A more natural
|
| 84 |
+
* way to print the value of a \p device_vector element might be:
|
| 85 |
+
*
|
| 86 |
+
* \code
|
| 87 |
+
* #include <thrust/device_vector.h>
|
| 88 |
+
* #include <iostream>
|
| 89 |
+
*
|
| 90 |
+
* int main(void)
|
| 91 |
+
* {
|
| 92 |
+
* thrust::device_vector<int> vec(1, 13);
|
| 93 |
+
*
|
| 94 |
+
* std::cout << vec[0] << std::endl;
|
| 95 |
+
*
|
| 96 |
+
* // 13 is printed
|
| 97 |
+
*
|
| 98 |
+
* return 0;
|
| 99 |
+
* }
|
| 100 |
+
* \endcode
|
| 101 |
+
*
|
| 102 |
+
* These kinds of operations should be used sparingly in performance-critical code, because
|
| 103 |
+
* they imply a potentially expensive copy between host and device space.
|
| 104 |
+
*
|
| 105 |
+
* Some operations which are possible with regular objects are impossible with their
|
| 106 |
+
* corresponding \p device_reference objects due to the requirements of the C++ language. For
|
| 107 |
+
* example, because the member access operator cannot be overloaded, member variables and functions
|
| 108 |
+
* of a referent object cannot be directly accessed through its \p device_reference.
|
| 109 |
+
*
|
| 110 |
+
* The following code, which generates a compiler error, illustrates:
|
| 111 |
+
*
|
| 112 |
+
* \code
|
| 113 |
+
* #include <thrust/device_vector.h>
|
| 114 |
+
*
|
| 115 |
+
* struct foo
|
| 116 |
+
* {
|
| 117 |
+
* int x;
|
| 118 |
+
* };
|
| 119 |
+
*
|
| 120 |
+
* int main(void)
|
| 121 |
+
* {
|
| 122 |
+
* thrust::device_vector<foo> foo_vec(1);
|
| 123 |
+
*
|
| 124 |
+
* thrust::device_reference<foo> foo_ref = foo_vec[0];
|
| 125 |
+
*
|
| 126 |
+
* foo_ref.x = 13; // ERROR: x cannot be accessed through foo_ref
|
| 127 |
+
*
|
| 128 |
+
* return 0;
|
| 129 |
+
* }
|
| 130 |
+
* \endcode
|
| 131 |
+
*
|
| 132 |
+
* Instead, a host space copy must be created to access \c foo's \c x member:
|
| 133 |
+
*
|
| 134 |
+
* \code
|
| 135 |
+
* #include <thrust/device_vector.h>
|
| 136 |
+
*
|
| 137 |
+
* struct foo
|
| 138 |
+
* {
|
| 139 |
+
* int x;
|
| 140 |
+
* };
|
| 141 |
+
*
|
| 142 |
+
* int main(void)
|
| 143 |
+
* {
|
| 144 |
+
* thrust::device_vector<foo> foo_vec(1);
|
| 145 |
+
*
|
| 146 |
+
* // create a local host-side foo object
|
| 147 |
+
* foo host_foo;
|
| 148 |
+
* host_foo.x = 13;
|
| 149 |
+
*
|
| 150 |
+
* thrust::device_reference<foo> foo_ref = foo_vec[0];
|
| 151 |
+
*
|
| 152 |
+
* foo_ref = host_foo;
|
| 153 |
+
*
|
| 154 |
+
* // foo_ref's x member is 13
|
| 155 |
+
*
|
| 156 |
+
* return 0;
|
| 157 |
+
* }
|
| 158 |
+
* \endcode
|
| 159 |
+
*
|
| 160 |
+
* Another common case where a \p device_reference cannot directly be used in place of
|
| 161 |
+
* its referent object occurs when passing them as parameters to functions like \c printf
|
| 162 |
+
* which have varargs parameters. Because varargs parameters must be Plain Old Data, a
|
| 163 |
+
* \p device_reference to a POD type requires a cast when passed to \c printf:
|
| 164 |
+
*
|
| 165 |
+
* \code
|
| 166 |
+
* #include <stdio.h>
|
| 167 |
+
* #include <thrust/device_vector.h>
|
| 168 |
+
*
|
| 169 |
+
* int main(void)
|
| 170 |
+
* {
|
| 171 |
+
* thrust::device_vector<int> vec(1,13);
|
| 172 |
+
*
|
| 173 |
+
* // vec[0] must be cast to int when passing to printf
|
| 174 |
+
* printf("%d\n", (int) vec[0]);
|
| 175 |
+
*
|
| 176 |
+
* return 0;
|
| 177 |
+
* }
|
| 178 |
+
* \endcode
|
| 179 |
+
*
|
| 180 |
+
* \see device_ptr
|
| 181 |
+
* \see device_vector
|
| 182 |
+
*/
|
| 183 |
+
template<typename T>
|
| 184 |
+
class device_reference
|
| 185 |
+
: public thrust::reference<
|
| 186 |
+
T,
|
| 187 |
+
thrust::device_ptr<T>,
|
| 188 |
+
thrust::device_reference<T>
|
| 189 |
+
>
|
| 190 |
+
{
|
| 191 |
+
private:
|
| 192 |
+
typedef thrust::reference<
|
| 193 |
+
T,
|
| 194 |
+
thrust::device_ptr<T>,
|
| 195 |
+
thrust::device_reference<T>
|
| 196 |
+
> super_t;
|
| 197 |
+
|
| 198 |
+
public:
|
| 199 |
+
/*! The type of the value referenced by this type of \p device_reference.
|
| 200 |
+
*/
|
| 201 |
+
typedef typename super_t::value_type value_type;
|
| 202 |
+
|
| 203 |
+
/*! The type of the expression <tt>&ref</tt>, where <tt>ref</tt> is a \p device_reference.
|
| 204 |
+
*/
|
| 205 |
+
typedef typename super_t::pointer pointer;
|
| 206 |
+
|
| 207 |
+
/*! This copy constructor accepts a const reference to another
|
| 208 |
+
* \p device_reference. After this \p device_reference is constructed,
|
| 209 |
+
* it shall refer to the same object as \p other.
|
| 210 |
+
*
|
| 211 |
+
* \param other A \p device_reference to copy from.
|
| 212 |
+
*
|
| 213 |
+
* The following code snippet demonstrates the semantics of this
|
| 214 |
+
* copy constructor.
|
| 215 |
+
*
|
| 216 |
+
* \code
|
| 217 |
+
* #include <thrust/device_vector.h>
|
| 218 |
+
* #include <assert.h>
|
| 219 |
+
* ...
|
| 220 |
+
* thrust::device_vector<int> v(1,0);
|
| 221 |
+
* thrust::device_reference<int> ref = v[0];
|
| 222 |
+
*
|
| 223 |
+
* // ref equals the object at v[0]
|
| 224 |
+
* assert(ref == v[0]);
|
| 225 |
+
*
|
| 226 |
+
* // the address of ref equals the address of v[0]
|
| 227 |
+
* assert(&ref == &v[0]);
|
| 228 |
+
*
|
| 229 |
+
* // modifying v[0] modifies ref
|
| 230 |
+
* v[0] = 13;
|
| 231 |
+
* assert(ref == 13);
|
| 232 |
+
* \endcode
|
| 233 |
+
*
|
| 234 |
+
* \note This constructor is templated primarily to allow initialization of
|
| 235 |
+
* <tt>device_reference<const T></tt> from <tt>device_reference<T></tt>.
|
| 236 |
+
*/
|
| 237 |
+
template<typename OtherT>
|
| 238 |
+
__host__ __device__
|
| 239 |
+
device_reference(const device_reference<OtherT> &other,
|
| 240 |
+
typename thrust::detail::enable_if_convertible<
|
| 241 |
+
typename device_reference<OtherT>::pointer,
|
| 242 |
+
pointer
|
| 243 |
+
>::type * = 0)
|
| 244 |
+
: super_t(other)
|
| 245 |
+
{}
|
| 246 |
+
|
| 247 |
+
/*! This copy constructor initializes this \p device_reference
|
| 248 |
+
* to refer to an object pointed to by the given \p device_ptr. After
|
| 249 |
+
* this \p device_reference is constructed, it shall refer to the
|
| 250 |
+
* object pointed to by \p ptr.
|
| 251 |
+
*
|
| 252 |
+
* \param ptr A \p device_ptr to copy from.
|
| 253 |
+
*
|
| 254 |
+
* The following code snippet demonstrates the semantic of this
|
| 255 |
+
* copy constructor.
|
| 256 |
+
*
|
| 257 |
+
* \code
|
| 258 |
+
* #include <thrust/device_vector.h>
|
| 259 |
+
* #include <assert.h>
|
| 260 |
+
* ...
|
| 261 |
+
* thrust::device_vector<int> v(1,0);
|
| 262 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 263 |
+
* thrust::device_reference<int> ref(ptr);
|
| 264 |
+
*
|
| 265 |
+
* // ref equals the object pointed to by ptr
|
| 266 |
+
* assert(ref == *ptr);
|
| 267 |
+
*
|
| 268 |
+
* // the address of ref equals ptr
|
| 269 |
+
* assert(&ref == ptr);
|
| 270 |
+
*
|
| 271 |
+
* // modifying *ptr modifies ref
|
| 272 |
+
* *ptr = 13;
|
| 273 |
+
* assert(ref == 13);
|
| 274 |
+
* \endcode
|
| 275 |
+
*/
|
| 276 |
+
__host__ __device__
|
| 277 |
+
explicit device_reference(const pointer &ptr)
|
| 278 |
+
: super_t(ptr)
|
| 279 |
+
{}
|
| 280 |
+
|
| 281 |
+
/*! This assignment operator assigns the value of the object referenced by
|
| 282 |
+
* the given \p device_reference to the object referenced by this
|
| 283 |
+
* \p device_reference.
|
| 284 |
+
*
|
| 285 |
+
* \param other The \p device_reference to assign from.
|
| 286 |
+
* \return <tt>*this</tt>
|
| 287 |
+
*/
|
| 288 |
+
template<typename OtherT>
|
| 289 |
+
__host__ __device__
|
| 290 |
+
device_reference &operator=(const device_reference<OtherT> &other)
|
| 291 |
+
{
|
| 292 |
+
return super_t::operator=(other);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
/*! Assignment operator assigns the value of the given value to the
|
| 296 |
+
* value referenced by this \p device_reference.
|
| 297 |
+
*
|
| 298 |
+
* \param x The value to assign from.
|
| 299 |
+
* \return <tt>*this</tt>
|
| 300 |
+
*/
|
| 301 |
+
__host__ __device__
|
| 302 |
+
device_reference &operator=(const value_type &x)
|
| 303 |
+
{
|
| 304 |
+
return super_t::operator=(x);
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
// declare these members for the purpose of Doxygenating them
|
| 308 |
+
// they actually exist in a derived-from class
|
| 309 |
+
#if 0
|
| 310 |
+
/*! Address-of operator returns a \p device_ptr pointing to the object
|
| 311 |
+
* referenced by this \p device_reference. It does not return the
|
| 312 |
+
* address of this \p device_reference.
|
| 313 |
+
*
|
| 314 |
+
* \return A \p device_ptr pointing to the object this
|
| 315 |
+
* \p device_reference references.
|
| 316 |
+
*/
|
| 317 |
+
__host__ __device__
|
| 318 |
+
pointer operator&(void) const;
|
| 319 |
+
|
| 320 |
+
/*! Conversion operator converts this \p device_reference to T
|
| 321 |
+
* by returning a copy of the object referenced by this
|
| 322 |
+
* \p device_reference.
|
| 323 |
+
*
|
| 324 |
+
* \return A copy of the object referenced by this \p device_reference.
|
| 325 |
+
*/
|
| 326 |
+
__host__ __device__
|
| 327 |
+
operator value_type (void) const;
|
| 328 |
+
|
| 329 |
+
/*! swaps the value this \p device_reference references with another.
|
| 330 |
+
* \p other The other \p device_reference with which to swap.
|
| 331 |
+
*/
|
| 332 |
+
__host__ __device__
|
| 333 |
+
void swap(device_reference &other);
|
| 334 |
+
|
| 335 |
+
/*! Prefix increment operator increments the object referenced by this
|
| 336 |
+
* \p device_reference.
|
| 337 |
+
*
|
| 338 |
+
* \return <tt>*this</tt>
|
| 339 |
+
*
|
| 340 |
+
* The following code snippet demonstrates the semantics of
|
| 341 |
+
* \p device_reference's prefix increment operator.
|
| 342 |
+
*
|
| 343 |
+
* \code
|
| 344 |
+
* #include <thrust/device_vector.h>
|
| 345 |
+
* #include <assert.h>
|
| 346 |
+
* ...
|
| 347 |
+
* thrust::device_vector<int> v(1,0);
|
| 348 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 349 |
+
* thrust::device_reference<int> ref(ptr);
|
| 350 |
+
*
|
| 351 |
+
* // ref equals 0
|
| 352 |
+
* assert(ref == 0);
|
| 353 |
+
*
|
| 354 |
+
* // the object pointed to by ptr equals 1
|
| 355 |
+
* assert(*ptr == 1);
|
| 356 |
+
*
|
| 357 |
+
* // v[0] equals 1
|
| 358 |
+
* assert(v[0] == 1);
|
| 359 |
+
*
|
| 360 |
+
* // increment ref
|
| 361 |
+
* ++ref;
|
| 362 |
+
*
|
| 363 |
+
* // ref equals 1
|
| 364 |
+
* assert(ref == 1);
|
| 365 |
+
*
|
| 366 |
+
* // the object pointed to by ptr equals 1
|
| 367 |
+
* assert(*ptr == 1);
|
| 368 |
+
*
|
| 369 |
+
* // v[0] equals 1
|
| 370 |
+
* assert(v[0] == 1);
|
| 371 |
+
* \endcode
|
| 372 |
+
*
|
| 373 |
+
* \note The increment executes as if it were executed on the host.
|
| 374 |
+
* This may change in a later version.
|
| 375 |
+
*/
|
| 376 |
+
device_reference &operator++(void);
|
| 377 |
+
|
| 378 |
+
/*! Postfix increment operator copies the object referenced by this
|
| 379 |
+
* \p device_reference, increments the object referenced by this
|
| 380 |
+
* \p device_reference, and returns the copy.
|
| 381 |
+
*
|
| 382 |
+
* \return A copy of the object referenced by this \p device_reference
|
| 383 |
+
* before being incremented.
|
| 384 |
+
*
|
| 385 |
+
* The following code snippet demonstrates the semantics of
|
| 386 |
+
* \p device_reference's postfix increment operator.
|
| 387 |
+
*
|
| 388 |
+
* \code
|
| 389 |
+
* #include <thrust/device_vector.h>
|
| 390 |
+
* #include <assert.h>
|
| 391 |
+
* ...
|
| 392 |
+
* thrust::device_vector<int> v(1,0);
|
| 393 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 394 |
+
* thrust::device_reference<int> ref(ptr);
|
| 395 |
+
*
|
| 396 |
+
* // ref equals 0
|
| 397 |
+
* assert(ref == 0);
|
| 398 |
+
*
|
| 399 |
+
* // the object pointed to by ptr equals 0
|
| 400 |
+
* assert(*ptr == 0);
|
| 401 |
+
*
|
| 402 |
+
* // v[0] equals 0
|
| 403 |
+
* assert(v[0] == 0);
|
| 404 |
+
*
|
| 405 |
+
* // increment ref
|
| 406 |
+
* int x = ref++;
|
| 407 |
+
*
|
| 408 |
+
* // x equals 0
|
| 409 |
+
* assert(x == 0)
|
| 410 |
+
*
|
| 411 |
+
* // ref equals 1
|
| 412 |
+
* assert(ref == 1);
|
| 413 |
+
*
|
| 414 |
+
* // the object pointed to by ptr equals 1
|
| 415 |
+
* assert(*ptr == 1);
|
| 416 |
+
*
|
| 417 |
+
* // v[0] equals 1
|
| 418 |
+
* assert(v[0] == 1);
|
| 419 |
+
* \endcode
|
| 420 |
+
*
|
| 421 |
+
* \note The increment executes as if it were executed on the host.
|
| 422 |
+
* This may change in a later version.
|
| 423 |
+
*/
|
| 424 |
+
value_type operator++(int);
|
| 425 |
+
|
| 426 |
+
/*! Addition assignment operator add-assigns the object referenced by this
|
| 427 |
+
* \p device_reference and returns this \p device_reference.
|
| 428 |
+
*
|
| 429 |
+
* \param rhs The right hand side of the add-assignment.
|
| 430 |
+
* \return <tt>*this</tt>.
|
| 431 |
+
*
|
| 432 |
+
* The following code snippet demonstrates the semantics of
|
| 433 |
+
* \p device_reference's addition assignment operator.
|
| 434 |
+
*
|
| 435 |
+
* \code
|
| 436 |
+
* #include <thrust/device_vector.h>
|
| 437 |
+
* #include <assert.h>
|
| 438 |
+
* ...
|
| 439 |
+
* thrust::device_vector<int> v(1,0);
|
| 440 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 441 |
+
* thrust::device_reference<int> ref(ptr);
|
| 442 |
+
*
|
| 443 |
+
* // ref equals 0
|
| 444 |
+
* assert(ref == 0);
|
| 445 |
+
*
|
| 446 |
+
* // the object pointed to by ptr equals 0
|
| 447 |
+
* assert(*ptr == 0);
|
| 448 |
+
*
|
| 449 |
+
* // v[0] equals 0
|
| 450 |
+
* assert(v[0] == 0);
|
| 451 |
+
*
|
| 452 |
+
* // add-assign ref
|
| 453 |
+
* ref += 5;
|
| 454 |
+
*
|
| 455 |
+
* // ref equals 5
|
| 456 |
+
* assert(ref == 5);
|
| 457 |
+
*
|
| 458 |
+
* // the object pointed to by ptr equals 5
|
| 459 |
+
* assert(*ptr == 5);
|
| 460 |
+
*
|
| 461 |
+
* // v[0] equals 5
|
| 462 |
+
* assert(v[0] == 5);
|
| 463 |
+
* \endcode
|
| 464 |
+
*
|
| 465 |
+
* \note The add-assignment executes as as if it were executed on the host.
|
| 466 |
+
* This may change in a later version.
|
| 467 |
+
*/
|
| 468 |
+
device_reference &operator+=(const T &rhs);
|
| 469 |
+
|
| 470 |
+
/*! Prefix decrement operator decrements the object referenced by this
|
| 471 |
+
* \p device_reference.
|
| 472 |
+
*
|
| 473 |
+
* \return <tt>*this</tt>
|
| 474 |
+
*
|
| 475 |
+
* The following code snippet demonstrates the semantics of
|
| 476 |
+
* \p device_reference's prefix decrement operator.
|
| 477 |
+
*
|
| 478 |
+
* \code
|
| 479 |
+
* #include <thrust/device_vector.h>
|
| 480 |
+
* #include <assert.h>
|
| 481 |
+
* ...
|
| 482 |
+
* thrust::device_vector<int> v(1,0);
|
| 483 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 484 |
+
* thrust::device_reference<int> ref(ptr);
|
| 485 |
+
*
|
| 486 |
+
* // ref equals 0
|
| 487 |
+
* assert(ref == 0);
|
| 488 |
+
*
|
| 489 |
+
* // the object pointed to by ptr equals 0
|
| 490 |
+
* assert(*ptr == 0);
|
| 491 |
+
*
|
| 492 |
+
* // v[0] equals 0
|
| 493 |
+
* assert(v[0] == 0);
|
| 494 |
+
*
|
| 495 |
+
* // decrement ref
|
| 496 |
+
* --ref;
|
| 497 |
+
*
|
| 498 |
+
* // ref equals -1
|
| 499 |
+
* assert(ref == -1);
|
| 500 |
+
*
|
| 501 |
+
* // the object pointed to by ptr equals -1
|
| 502 |
+
* assert(*ptr == -1);
|
| 503 |
+
*
|
| 504 |
+
* // v[0] equals -1
|
| 505 |
+
* assert(v[0] == -1);
|
| 506 |
+
* \endcode
|
| 507 |
+
*
|
| 508 |
+
* \note The decrement executes as if it were executed on the host.
|
| 509 |
+
* This may change in a later version.
|
| 510 |
+
*/
|
| 511 |
+
device_reference &operator--(void);
|
| 512 |
+
|
| 513 |
+
/*! Postfix decrement operator copies the object referenced by this
|
| 514 |
+
* \p device_reference, decrements the object referenced by this
|
| 515 |
+
* \p device_reference, and returns the copy.
|
| 516 |
+
*
|
| 517 |
+
* \return A copy of the object referenced by this \p device_reference
|
| 518 |
+
* before being decremented.
|
| 519 |
+
*
|
| 520 |
+
* The following code snippet demonstrates the semantics of
|
| 521 |
+
* \p device_reference's postfix decrement operator.
|
| 522 |
+
*
|
| 523 |
+
* \code
|
| 524 |
+
* #include <thrust/device_vector.h>
|
| 525 |
+
* #include <assert.h>
|
| 526 |
+
* ...
|
| 527 |
+
* thrust::device_vector<int> v(1,0);
|
| 528 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 529 |
+
* thrust::device_reference<int> ref(ptr);
|
| 530 |
+
*
|
| 531 |
+
* // ref equals 0
|
| 532 |
+
* assert(ref == 0);
|
| 533 |
+
*
|
| 534 |
+
* // the object pointed to by ptr equals 0
|
| 535 |
+
* assert(*ptr == 0);
|
| 536 |
+
*
|
| 537 |
+
* // v[0] equals 0
|
| 538 |
+
* assert(v[0] == 0);
|
| 539 |
+
*
|
| 540 |
+
* // decrement ref
|
| 541 |
+
* int x = ref--;
|
| 542 |
+
*
|
| 543 |
+
* // x equals 0
|
| 544 |
+
* assert(x == 0)
|
| 545 |
+
*
|
| 546 |
+
* // ref equals -1
|
| 547 |
+
* assert(ref == -1);
|
| 548 |
+
*
|
| 549 |
+
* // the object pointed to by ptr equals -1
|
| 550 |
+
* assert(*ptr == -1);
|
| 551 |
+
*
|
| 552 |
+
* // v[0] equals -1
|
| 553 |
+
* assert(v[0] == -1);
|
| 554 |
+
* \endcode
|
| 555 |
+
*
|
| 556 |
+
* \note The decrement executes as if it were executed on the host.
|
| 557 |
+
* This may change in a later version.
|
| 558 |
+
*/
|
| 559 |
+
value_type operator--(int);
|
| 560 |
+
|
| 561 |
+
/*! Subtraction assignment operator subtract-assigns the object referenced by this
|
| 562 |
+
* \p device_reference and returns this \p device_reference.
|
| 563 |
+
*
|
| 564 |
+
* \param rhs The right hand side of the subtraction-assignment.
|
| 565 |
+
* \return <tt>*this</tt>.
|
| 566 |
+
*
|
| 567 |
+
* The following code snippet demonstrates the semantics of
|
| 568 |
+
* \p device_reference's addition assignment operator.
|
| 569 |
+
*
|
| 570 |
+
* \code
|
| 571 |
+
* #include <thrust/device_vector.h>
|
| 572 |
+
* #include <assert.h>
|
| 573 |
+
* ...
|
| 574 |
+
* thrust::device_vector<int> v(1,0);
|
| 575 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 576 |
+
* thrust::device_reference<int> ref(ptr);
|
| 577 |
+
*
|
| 578 |
+
* // ref equals 0
|
| 579 |
+
* assert(ref == 0);
|
| 580 |
+
*
|
| 581 |
+
* // the object pointed to by ptr equals 0
|
| 582 |
+
* assert(*ptr == 0);
|
| 583 |
+
*
|
| 584 |
+
* // v[0] equals 0
|
| 585 |
+
* assert(v[0] == 0);
|
| 586 |
+
*
|
| 587 |
+
* // subtract-assign ref
|
| 588 |
+
* ref -= 5;
|
| 589 |
+
*
|
| 590 |
+
* // ref equals -5
|
| 591 |
+
* assert(ref == -5);
|
| 592 |
+
*
|
| 593 |
+
* // the object pointed to by ptr equals -5
|
| 594 |
+
* assert(*ptr == -5);
|
| 595 |
+
*
|
| 596 |
+
* // v[0] equals -5
|
| 597 |
+
* assert(v[0] == -5);
|
| 598 |
+
* \endcode
|
| 599 |
+
*
|
| 600 |
+
* \note The subtract-assignment executes as as if it were executed on the host.
|
| 601 |
+
* This may change in a later version.
|
| 602 |
+
*/
|
| 603 |
+
device_reference &operator-=(const T &rhs);
|
| 604 |
+
|
| 605 |
+
/*! Multiplication assignment operator multiply-assigns the object referenced by this
|
| 606 |
+
* \p device_reference and returns this \p device_reference.
|
| 607 |
+
*
|
| 608 |
+
* \param rhs The right hand side of the multiply-assignment.
|
| 609 |
+
* \return <tt>*this</tt>.
|
| 610 |
+
*
|
| 611 |
+
* The following code snippet demonstrates the semantics of
|
| 612 |
+
* \p device_reference's multiply assignment operator.
|
| 613 |
+
*
|
| 614 |
+
* \code
|
| 615 |
+
* #include <thrust/device_vector.h>
|
| 616 |
+
* #include <assert.h>
|
| 617 |
+
* ...
|
| 618 |
+
* thrust::device_vector<int> v(1,1);
|
| 619 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 620 |
+
* thrust::device_reference<int> ref(ptr);
|
| 621 |
+
*
|
| 622 |
+
* // ref equals 1
|
| 623 |
+
* assert(ref == 1);
|
| 624 |
+
*
|
| 625 |
+
* // the object pointed to by ptr equals 1
|
| 626 |
+
* assert(*ptr == 1);
|
| 627 |
+
*
|
| 628 |
+
* // v[0] equals 1
|
| 629 |
+
* assert(v[0] == 1);
|
| 630 |
+
*
|
| 631 |
+
* // multiply-assign ref
|
| 632 |
+
* ref *= 5;
|
| 633 |
+
*
|
| 634 |
+
* // ref equals 5
|
| 635 |
+
* assert(ref == 5);
|
| 636 |
+
*
|
| 637 |
+
* // the object pointed to by ptr equals 5
|
| 638 |
+
* assert(*ptr == 5);
|
| 639 |
+
*
|
| 640 |
+
* // v[0] equals 5
|
| 641 |
+
* assert(v[0] == 5);
|
| 642 |
+
* \endcode
|
| 643 |
+
*
|
| 644 |
+
* \note The multiply-assignment executes as as if it were executed on the host.
|
| 645 |
+
* This may change in a later version.
|
| 646 |
+
*/
|
| 647 |
+
device_reference &operator*=(const T &rhs);
|
| 648 |
+
|
| 649 |
+
/*! Division assignment operator divide-assigns the object referenced by this
|
| 650 |
+
* \p device_reference and returns this \p device_reference.
|
| 651 |
+
*
|
| 652 |
+
* \param rhs The right hand side of the divide-assignment.
|
| 653 |
+
* \return <tt>*this</tt>.
|
| 654 |
+
*
|
| 655 |
+
* The following code snippet demonstrates the semantics of
|
| 656 |
+
* \p device_reference's divide assignment operator.
|
| 657 |
+
*
|
| 658 |
+
* \code
|
| 659 |
+
* #include <thrust/device_vector.h>
|
| 660 |
+
* #include <assert.h>
|
| 661 |
+
* ...
|
| 662 |
+
* thrust::device_vector<int> v(1,5);
|
| 663 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 664 |
+
* thrust::device_reference<int> ref(ptr);
|
| 665 |
+
*
|
| 666 |
+
* // ref equals 5
|
| 667 |
+
* assert(ref == 5);
|
| 668 |
+
*
|
| 669 |
+
* // the object pointed to by ptr equals 5
|
| 670 |
+
* assert(*ptr == 5);
|
| 671 |
+
*
|
| 672 |
+
* // v[0] equals 5
|
| 673 |
+
* assert(v[0] == 5);
|
| 674 |
+
*
|
| 675 |
+
* // divide-assign ref
|
| 676 |
+
* ref /= 5;
|
| 677 |
+
*
|
| 678 |
+
* // ref equals 1
|
| 679 |
+
* assert(ref == 1);
|
| 680 |
+
*
|
| 681 |
+
* // the object pointed to by ptr equals 1
|
| 682 |
+
* assert(*ptr == 1);
|
| 683 |
+
*
|
| 684 |
+
* // v[0] equals 1
|
| 685 |
+
* assert(v[0] == 1);
|
| 686 |
+
* \endcode
|
| 687 |
+
*
|
| 688 |
+
* \note The divide-assignment executes as as if it were executed on the host.
|
| 689 |
+
* This may change in a later version.
|
| 690 |
+
*/
|
| 691 |
+
device_reference &operator/=(const T &rhs);
|
| 692 |
+
|
| 693 |
+
/*! Modulation assignment operator modulus-assigns the object referenced by this
|
| 694 |
+
* \p device_reference and returns this \p device_reference.
|
| 695 |
+
*
|
| 696 |
+
* \param rhs The right hand side of the divide-assignment.
|
| 697 |
+
* \return <tt>*this</tt>.
|
| 698 |
+
*
|
| 699 |
+
* The following code snippet demonstrates the semantics of
|
| 700 |
+
* \p device_reference's divide assignment operator.
|
| 701 |
+
*
|
| 702 |
+
* \code
|
| 703 |
+
* #include <thrust/device_vector.h>
|
| 704 |
+
* #include <assert.h>
|
| 705 |
+
* ...
|
| 706 |
+
* thrust::device_vector<int> v(1,5);
|
| 707 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 708 |
+
* thrust::device_reference<int> ref(ptr);
|
| 709 |
+
*
|
| 710 |
+
* // ref equals 5
|
| 711 |
+
* assert(ref == 5);
|
| 712 |
+
*
|
| 713 |
+
* // the object pointed to by ptr equals 5
|
| 714 |
+
* assert(*ptr == 5);
|
| 715 |
+
*
|
| 716 |
+
* // v[0] equals 5
|
| 717 |
+
* assert(v[0] == 5);
|
| 718 |
+
*
|
| 719 |
+
* // modulus-assign ref
|
| 720 |
+
* ref %= 5;
|
| 721 |
+
*
|
| 722 |
+
* // ref equals 0
|
| 723 |
+
* assert(ref == 0);
|
| 724 |
+
*
|
| 725 |
+
* // the object pointed to by ptr equals 0
|
| 726 |
+
* assert(*ptr == 0);
|
| 727 |
+
*
|
| 728 |
+
* // v[0] equals 0
|
| 729 |
+
* assert(v[0] == 0);
|
| 730 |
+
* \endcode
|
| 731 |
+
*
|
| 732 |
+
* \note The modulus-assignment executes as as if it were executed on the host.
|
| 733 |
+
* This may change in a later version.
|
| 734 |
+
*/
|
| 735 |
+
device_reference &operator%=(const T &rhs);
|
| 736 |
+
|
| 737 |
+
/*! Bitwise left shift assignment operator left shift-assigns the object referenced by this
|
| 738 |
+
* \p device_reference and returns this \p device_reference.
|
| 739 |
+
*
|
| 740 |
+
* \param rhs The right hand side of the left shift-assignment.
|
| 741 |
+
* \return <tt>*this</tt>.
|
| 742 |
+
*
|
| 743 |
+
* The following code snippet demonstrates the semantics of
|
| 744 |
+
* \p device_reference's left shift assignment operator.
|
| 745 |
+
*
|
| 746 |
+
* \code
|
| 747 |
+
* #include <thrust/device_vector.h>
|
| 748 |
+
* #include <assert.h>
|
| 749 |
+
* ...
|
| 750 |
+
* thrust::device_vector<int> v(1,1);
|
| 751 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 752 |
+
* thrust::device_reference<int> ref(ptr);
|
| 753 |
+
*
|
| 754 |
+
* // ref equals 1
|
| 755 |
+
* assert(ref == 1);
|
| 756 |
+
*
|
| 757 |
+
* // the object pointed to by ptr equals 1
|
| 758 |
+
* assert(*ptr == 1);
|
| 759 |
+
*
|
| 760 |
+
* // v[0] equals 1
|
| 761 |
+
* assert(v[0] == 1);
|
| 762 |
+
*
|
| 763 |
+
* // left shift-assign ref
|
| 764 |
+
* ref <<= 1;
|
| 765 |
+
*
|
| 766 |
+
* // ref equals 2
|
| 767 |
+
* assert(ref == 2);
|
| 768 |
+
*
|
| 769 |
+
* // the object pointed to by ptr equals 2
|
| 770 |
+
* assert(*ptr == 2);
|
| 771 |
+
*
|
| 772 |
+
* // v[0] equals 2
|
| 773 |
+
* assert(v[0] == 2);
|
| 774 |
+
* \endcode
|
| 775 |
+
*
|
| 776 |
+
* \note The left shift-assignment executes as as if it were executed on the host.
|
| 777 |
+
* This may change in a later version.
|
| 778 |
+
*/
|
| 779 |
+
device_reference &operator<<=(const T &rhs);
|
| 780 |
+
|
| 781 |
+
/*! Bitwise right shift assignment operator right shift-assigns the object referenced by this
|
| 782 |
+
* \p device_reference and returns this \p device_reference.
|
| 783 |
+
*
|
| 784 |
+
* \param rhs The right hand side of the right shift-assignment.
|
| 785 |
+
* \return <tt>*this</tt>.
|
| 786 |
+
*
|
| 787 |
+
* The following code snippet demonstrates the semantics of
|
| 788 |
+
* \p device_reference's right shift assignment operator.
|
| 789 |
+
*
|
| 790 |
+
* \code
|
| 791 |
+
* #include <thrust/device_vector.h>
|
| 792 |
+
* #include <assert.h>
|
| 793 |
+
* ...
|
| 794 |
+
* thrust::device_vector<int> v(1,2);
|
| 795 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 796 |
+
* thrust::device_reference<int> ref(ptr);
|
| 797 |
+
*
|
| 798 |
+
* // ref equals 2
|
| 799 |
+
* assert(ref == 2);
|
| 800 |
+
*
|
| 801 |
+
* // the object pointed to by ptr equals 2
|
| 802 |
+
* assert(*ptr == 2);
|
| 803 |
+
*
|
| 804 |
+
* // v[0] equals 2
|
| 805 |
+
* assert(v[0] == 2);
|
| 806 |
+
*
|
| 807 |
+
* // right shift-assign ref
|
| 808 |
+
* ref >>= 1;
|
| 809 |
+
*
|
| 810 |
+
* // ref equals 1
|
| 811 |
+
* assert(ref == 1);
|
| 812 |
+
*
|
| 813 |
+
* // the object pointed to by ptr equals 1
|
| 814 |
+
* assert(*ptr == 1);
|
| 815 |
+
*
|
| 816 |
+
* // v[0] equals 1
|
| 817 |
+
* assert(v[0] == 1);
|
| 818 |
+
* \endcode
|
| 819 |
+
*
|
| 820 |
+
* \note The right shift-assignment executes as as if it were executed on the host.
|
| 821 |
+
* This may change in a later version.
|
| 822 |
+
*/
|
| 823 |
+
device_reference &operator>>=(const T &rhs);
|
| 824 |
+
|
| 825 |
+
/*! Bitwise AND assignment operator AND-assigns the object referenced by this
|
| 826 |
+
* \p device_reference and returns this \p device_reference.
|
| 827 |
+
*
|
| 828 |
+
* \param rhs The right hand side of the AND-assignment.
|
| 829 |
+
* \return <tt>*this</tt>.
|
| 830 |
+
*
|
| 831 |
+
* The following code snippet demonstrates the semantics of
|
| 832 |
+
* \p device_reference's AND assignment operator.
|
| 833 |
+
*
|
| 834 |
+
* \code
|
| 835 |
+
* #include <thrust/device_vector.h>
|
| 836 |
+
* #include <assert.h>
|
| 837 |
+
* ...
|
| 838 |
+
* thrust::device_vector<int> v(1,1);
|
| 839 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 840 |
+
* thrust::device_reference<int> ref(ptr);
|
| 841 |
+
*
|
| 842 |
+
* // ref equals 1
|
| 843 |
+
* assert(ref == 1);
|
| 844 |
+
*
|
| 845 |
+
* // the object pointed to by ptr equals 1
|
| 846 |
+
* assert(*ptr == 1);
|
| 847 |
+
*
|
| 848 |
+
* // v[0] equals 1
|
| 849 |
+
* assert(v[0] == 1);
|
| 850 |
+
*
|
| 851 |
+
* // right AND-assign ref
|
| 852 |
+
* ref &= 0;
|
| 853 |
+
*
|
| 854 |
+
* // ref equals 0
|
| 855 |
+
* assert(ref == 0);
|
| 856 |
+
*
|
| 857 |
+
* // the object pointed to by ptr equals 0
|
| 858 |
+
* assert(*ptr == 0);
|
| 859 |
+
*
|
| 860 |
+
* // v[0] equals 0
|
| 861 |
+
* assert(v[0] == 0);
|
| 862 |
+
* \endcode
|
| 863 |
+
*
|
| 864 |
+
* \note The AND-assignment executes as as if it were executed on the host.
|
| 865 |
+
* This may change in a later version.
|
| 866 |
+
*/
|
| 867 |
+
device_reference &operator&=(const T &rhs);
|
| 868 |
+
|
| 869 |
+
/*! Bitwise OR assignment operator OR-assigns the object referenced by this
|
| 870 |
+
* \p device_reference and returns this \p device_reference.
|
| 871 |
+
*
|
| 872 |
+
* \param rhs The right hand side of the OR-assignment.
|
| 873 |
+
* \return <tt>*this</tt>.
|
| 874 |
+
*
|
| 875 |
+
* The following code snippet demonstrates the semantics of
|
| 876 |
+
* \p device_reference's OR assignment operator.
|
| 877 |
+
*
|
| 878 |
+
* \code
|
| 879 |
+
* #include <thrust/device_vector.h>
|
| 880 |
+
* #include <assert.h>
|
| 881 |
+
* ...
|
| 882 |
+
* thrust::device_vector<int> v(1,0);
|
| 883 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 884 |
+
* thrust::device_reference<int> ref(ptr);
|
| 885 |
+
*
|
| 886 |
+
* // ref equals 0
|
| 887 |
+
* assert(ref == 0);
|
| 888 |
+
*
|
| 889 |
+
* // the object pointed to by ptr equals 0
|
| 890 |
+
* assert(*ptr == 0);
|
| 891 |
+
*
|
| 892 |
+
* // v[0] equals 0
|
| 893 |
+
* assert(v[0] == 0);
|
| 894 |
+
*
|
| 895 |
+
* // right OR-assign ref
|
| 896 |
+
* ref |= 1;
|
| 897 |
+
*
|
| 898 |
+
* // ref equals 1
|
| 899 |
+
* assert(ref == 1);
|
| 900 |
+
*
|
| 901 |
+
* // the object pointed to by ptr equals 1
|
| 902 |
+
* assert(*ptr == 1);
|
| 903 |
+
*
|
| 904 |
+
* // v[0] equals 1
|
| 905 |
+
* assert(v[0] == 1);
|
| 906 |
+
* \endcode
|
| 907 |
+
*
|
| 908 |
+
* \note The OR-assignment executes as as if it were executed on the host.
|
| 909 |
+
* This may change in a later version.
|
| 910 |
+
*/
|
| 911 |
+
device_reference &operator|=(const T &rhs);
|
| 912 |
+
|
| 913 |
+
/*! Bitwise XOR assignment operator XOR-assigns the object referenced by this
|
| 914 |
+
* \p device_reference and returns this \p device_reference.
|
| 915 |
+
*
|
| 916 |
+
* \param rhs The right hand side of the XOR-assignment.
|
| 917 |
+
* \return <tt>*this</tt>.
|
| 918 |
+
*
|
| 919 |
+
* The following code snippet demonstrates the semantics of
|
| 920 |
+
* \p device_reference's XOR assignment operator.
|
| 921 |
+
*
|
| 922 |
+
* \code
|
| 923 |
+
* #include <thrust/device_vector.h>
|
| 924 |
+
* #include <assert.h>
|
| 925 |
+
* ...
|
| 926 |
+
* thrust::device_vector<int> v(1,1);
|
| 927 |
+
* thrust::device_ptr<int> ptr = &v[0];
|
| 928 |
+
* thrust::device_reference<int> ref(ptr);
|
| 929 |
+
*
|
| 930 |
+
* // ref equals 1
|
| 931 |
+
* assert(ref == 1);
|
| 932 |
+
*
|
| 933 |
+
* // the object pointed to by ptr equals 1
|
| 934 |
+
* assert(*ptr == 1);
|
| 935 |
+
*
|
| 936 |
+
* // v[0] equals 1
|
| 937 |
+
* assert(v[0] == 1);
|
| 938 |
+
*
|
| 939 |
+
* // right XOR-assign ref
|
| 940 |
+
* ref ^= 1;
|
| 941 |
+
*
|
| 942 |
+
* // ref equals 0
|
| 943 |
+
* assert(ref == 0);
|
| 944 |
+
*
|
| 945 |
+
* // the object pointed to by ptr equals 0
|
| 946 |
+
* assert(*ptr == 0);
|
| 947 |
+
*
|
| 948 |
+
* // v[0] equals 0
|
| 949 |
+
* assert(v[0] == 0);
|
| 950 |
+
* \endcode
|
| 951 |
+
*
|
| 952 |
+
* \note The XOR-assignment executes as as if it were executed on the host.
|
| 953 |
+
* This may change in a later version.
|
| 954 |
+
*/
|
| 955 |
+
device_reference &operator^=(const T &rhs);
|
| 956 |
+
#endif // end doxygen-only members
|
| 957 |
+
}; // end device_reference
|
| 958 |
+
|
| 959 |
+
/*! swaps the value of one \p device_reference with another.
|
| 960 |
+
* \p x The first \p device_reference of interest.
|
| 961 |
+
* \p y The second \p device_reference of interest.
|
| 962 |
+
*/
|
| 963 |
+
template<typename T>
|
| 964 |
+
__host__ __device__
|
| 965 |
+
void swap(device_reference<T>& x, device_reference<T>& y)
|
| 966 |
+
{
|
| 967 |
+
x.swap(y);
|
| 968 |
+
}
|
| 969 |
+
|
| 970 |
+
// declare these methods for the purpose of Doxygenating them
|
| 971 |
+
// they actually are defined for a derived-from class
|
| 972 |
+
#if THRUST_DOXYGEN
|
| 973 |
+
/*! Writes to an output stream the value of a \p device_reference.
|
| 974 |
+
*
|
| 975 |
+
* \param os The output stream.
|
| 976 |
+
* \param y The \p device_reference to output.
|
| 977 |
+
* \return os.
|
| 978 |
+
*/
|
| 979 |
+
template<typename T, typename charT, typename traits>
|
| 980 |
+
std::basic_ostream<charT, traits> &
|
| 981 |
+
operator<<(std::basic_ostream<charT, traits> &os, const device_reference<T> &y);
|
| 982 |
+
#endif
|
| 983 |
+
|
| 984 |
+
/*! \} // memory_management
|
| 985 |
+
*/
|
| 986 |
+
|
| 987 |
+
THRUST_NAMESPACE_END
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/device_vector.h
ADDED
|
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2018 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file
|
| 19 |
+
* \brief A dynamically-sizable array of elements which resides in memory
|
| 20 |
+
* accessible to devices.
|
| 21 |
+
*/
|
| 22 |
+
|
| 23 |
+
#pragma once
|
| 24 |
+
|
| 25 |
+
#include <thrust/detail/config.h>
|
| 26 |
+
#include <thrust/detail/vector_base.h>
|
| 27 |
+
#include <thrust/device_allocator.h>
|
| 28 |
+
|
| 29 |
+
#include <initializer_list>
|
| 30 |
+
#include <vector>
|
| 31 |
+
#include <utility>
|
| 32 |
+
|
| 33 |
+
THRUST_NAMESPACE_BEGIN
|
| 34 |
+
|
| 35 |
+
/*! \addtogroup containers Containers
|
| 36 |
+
* \{
|
| 37 |
+
*/
|
| 38 |
+
|
| 39 |
+
/*! A \p device_vector is a container that supports random access to elements,
|
| 40 |
+
* constant time removal of elements at the end, and linear time insertion
|
| 41 |
+
* and removal of elements at the beginning or in the middle. The number of
|
| 42 |
+
* elements in a \p device_vector may vary dynamically; memory management is
|
| 43 |
+
* automatic. The memory associated with a \p device_vector resides in the
|
| 44 |
+
* memory accessible to devices.
|
| 45 |
+
*
|
| 46 |
+
* \see https://en.cppreference.com/w/cpp/container/vector
|
| 47 |
+
* \see device_allocator
|
| 48 |
+
* \see host_vector
|
| 49 |
+
* \see universal_vector
|
| 50 |
+
*/
|
| 51 |
+
template<typename T, typename Alloc = thrust::device_allocator<T> >
|
| 52 |
+
class device_vector
|
| 53 |
+
: public detail::vector_base<T,Alloc>
|
| 54 |
+
{
|
| 55 |
+
private:
|
| 56 |
+
typedef detail::vector_base<T,Alloc> Parent;
|
| 57 |
+
|
| 58 |
+
public:
|
| 59 |
+
/*! \cond
|
| 60 |
+
*/
|
| 61 |
+
typedef typename Parent::size_type size_type;
|
| 62 |
+
typedef typename Parent::value_type value_type;
|
| 63 |
+
/*! \endcond
|
| 64 |
+
*/
|
| 65 |
+
|
| 66 |
+
/*! This constructor creates an empty \p device_vector.
|
| 67 |
+
*/
|
| 68 |
+
device_vector(void)
|
| 69 |
+
:Parent() {}
|
| 70 |
+
|
| 71 |
+
/*! This constructor creates an empty \p device_vector.
|
| 72 |
+
* \param alloc The allocator to use by this device_vector.
|
| 73 |
+
*/
|
| 74 |
+
device_vector(const Alloc &alloc)
|
| 75 |
+
:Parent(alloc) {}
|
| 76 |
+
|
| 77 |
+
/*! The destructor erases the elements.
|
| 78 |
+
*/
|
| 79 |
+
// Define an empty destructor to explicitly specify
|
| 80 |
+
// its execution space qualifier, as a workaround for nvcc warning
|
| 81 |
+
~device_vector(void) {}
|
| 82 |
+
|
| 83 |
+
/*! This constructor creates a \p device_vector with the given
|
| 84 |
+
* size.
|
| 85 |
+
* \param n The number of elements to initially create.
|
| 86 |
+
*/
|
| 87 |
+
explicit device_vector(size_type n)
|
| 88 |
+
:Parent(n) {}
|
| 89 |
+
|
| 90 |
+
/*! This constructor creates a \p device_vector with the given
|
| 91 |
+
* size.
|
| 92 |
+
* \param n The number of elements to initially create.
|
| 93 |
+
* \param alloc The allocator to use by this device_vector.
|
| 94 |
+
*/
|
| 95 |
+
explicit device_vector(size_type n, const Alloc &alloc)
|
| 96 |
+
:Parent(n,alloc) {}
|
| 97 |
+
|
| 98 |
+
/*! This constructor creates a \p device_vector with copies
|
| 99 |
+
* of an exemplar element.
|
| 100 |
+
* \param n The number of elements to initially create.
|
| 101 |
+
* \param value An element to copy.
|
| 102 |
+
*/
|
| 103 |
+
explicit device_vector(size_type n, const value_type &value)
|
| 104 |
+
:Parent(n,value) {}
|
| 105 |
+
|
| 106 |
+
/*! This constructor creates a \p device_vector with copies
|
| 107 |
+
* of an exemplar element.
|
| 108 |
+
* \param n The number of elements to initially create.
|
| 109 |
+
* \param value An element to copy.
|
| 110 |
+
* \param alloc The allocator to use by this device_vector.
|
| 111 |
+
*/
|
| 112 |
+
explicit device_vector(size_type n, const value_type &value, const Alloc &alloc)
|
| 113 |
+
:Parent(n,value,alloc) {}
|
| 114 |
+
|
| 115 |
+
/*! Copy constructor copies from an exemplar \p device_vector.
|
| 116 |
+
* \param v The \p device_vector to copy.
|
| 117 |
+
*/
|
| 118 |
+
device_vector(const device_vector &v)
|
| 119 |
+
:Parent(v) {}
|
| 120 |
+
|
| 121 |
+
/*! Copy constructor copies from an exemplar \p device_vector.
|
| 122 |
+
* \param v The \p device_vector to copy.
|
| 123 |
+
* \param alloc The allocator to use by this device_vector.
|
| 124 |
+
*/
|
| 125 |
+
device_vector(const device_vector &v, const Alloc &alloc)
|
| 126 |
+
:Parent(v,alloc) {}
|
| 127 |
+
|
| 128 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 129 |
+
/*! Move constructor moves from another \p device_vector.
|
| 130 |
+
* \param v The device_vector to move.
|
| 131 |
+
*/
|
| 132 |
+
device_vector(device_vector &&v)
|
| 133 |
+
:Parent(std::move(v)) {}
|
| 134 |
+
|
| 135 |
+
/*! Move constructor moves from another \p device_vector.
|
| 136 |
+
* \param v The device_vector to move.
|
| 137 |
+
* \param alloc The allocator to use by this device_vector.
|
| 138 |
+
*/
|
| 139 |
+
device_vector(device_vector &&v, const Alloc &alloc)
|
| 140 |
+
:Parent(std::move(v), alloc) {}
|
| 141 |
+
#endif // THRUST_CPP_DIALECT >= 2011
|
| 142 |
+
|
| 143 |
+
/*! Copy assign operator copies another \p device_vector with the same type.
|
| 144 |
+
* \param v The \p device_vector to copy.
|
| 145 |
+
*/
|
| 146 |
+
device_vector &operator=(const device_vector &v)
|
| 147 |
+
{ Parent::operator=(v); return *this; }
|
| 148 |
+
|
| 149 |
+
#if THRUST_CPP_DIALECT >= 2011
|
| 150 |
+
/*! Move assign operator moves from another \p device_vector.
|
| 151 |
+
* \param v The device_vector to move.
|
| 152 |
+
*/
|
| 153 |
+
device_vector &operator=(device_vector &&v)
|
| 154 |
+
{ Parent::operator=(std::move(v)); return *this; }
|
| 155 |
+
#endif // THRUST_CPP_DIALECT >= 2011
|
| 156 |
+
|
| 157 |
+
/*! Copy constructor copies from an exemplar \p device_vector with different type.
|
| 158 |
+
* \param v The \p device_vector to copy.
|
| 159 |
+
*/
|
| 160 |
+
template<typename OtherT, typename OtherAlloc>
|
| 161 |
+
explicit device_vector(const device_vector<OtherT,OtherAlloc> &v)
|
| 162 |
+
:Parent(v) {}
|
| 163 |
+
|
| 164 |
+
/*! Assign operator copies from an exemplar \p device_vector with different type.
|
| 165 |
+
* \param v The \p device_vector to copy.
|
| 166 |
+
*/
|
| 167 |
+
template<typename OtherT, typename OtherAlloc>
|
| 168 |
+
device_vector &operator=(const device_vector<OtherT,OtherAlloc> &v)
|
| 169 |
+
{ Parent::operator=(v); return *this; }
|
| 170 |
+
|
| 171 |
+
/*! Copy constructor copies from an exemplar \c std::vector.
|
| 172 |
+
* \param v The <tt>std::vector</tt> to copy.
|
| 173 |
+
*/
|
| 174 |
+
template<typename OtherT, typename OtherAlloc>
|
| 175 |
+
device_vector(const std::vector<OtherT,OtherAlloc> &v)
|
| 176 |
+
:Parent(v) {}
|
| 177 |
+
|
| 178 |
+
/*! Assign operator copies from an exemplar <tt>std::vector</tt>.
|
| 179 |
+
* \param v The <tt>std::vector</tt> to copy.
|
| 180 |
+
*/
|
| 181 |
+
template<typename OtherT, typename OtherAlloc>
|
| 182 |
+
device_vector &operator=(const std::vector<OtherT,OtherAlloc> &v)
|
| 183 |
+
{ Parent::operator=(v); return *this;}
|
| 184 |
+
|
| 185 |
+
/*! Copy construct from a \p vector_base whose element type is convertible
|
| 186 |
+
* to \c T.
|
| 187 |
+
*
|
| 188 |
+
* \param v The \p vector_base to copy.
|
| 189 |
+
*/
|
| 190 |
+
template<typename OtherT, typename OtherAlloc>
|
| 191 |
+
device_vector(const detail::vector_base<OtherT,OtherAlloc> &v)
|
| 192 |
+
:Parent(v) {}
|
| 193 |
+
|
| 194 |
+
/*! Assign a \p vector_base whose element type is convertible to \c T.
|
| 195 |
+
* \param v The \p vector_base to copy.
|
| 196 |
+
*/
|
| 197 |
+
template<typename OtherT, typename OtherAlloc>
|
| 198 |
+
device_vector &operator=(const detail::vector_base<OtherT,OtherAlloc> &v)
|
| 199 |
+
{ Parent::operator=(v); return *this; }
|
| 200 |
+
|
| 201 |
+
/*! This constructor builds a \p device_vector from an intializer_list.
|
| 202 |
+
* \param il The intializer_list.
|
| 203 |
+
*/
|
| 204 |
+
device_vector(std::initializer_list<T> il)
|
| 205 |
+
:Parent(il) {}
|
| 206 |
+
|
| 207 |
+
/*! This constructor builds a \p device_vector from an intializer_list.
|
| 208 |
+
* \param il The intializer_list.
|
| 209 |
+
* \param alloc The allocator to use by this device_vector.
|
| 210 |
+
*/
|
| 211 |
+
device_vector(std::initializer_list<T> il, const Alloc &alloc)
|
| 212 |
+
:Parent(il, alloc) {}
|
| 213 |
+
|
| 214 |
+
/*! Assign an \p intializer_list with a matching element type
|
| 215 |
+
* \param il The intializer_list.
|
| 216 |
+
*/
|
| 217 |
+
device_vector &operator=(std::initializer_list<T> il)
|
| 218 |
+
{ Parent::operator=(il); return *this; }
|
| 219 |
+
|
| 220 |
+
/*! This constructor builds a \p device_vector from a range.
|
| 221 |
+
* \param first The beginning of the range.
|
| 222 |
+
* \param last The end of the range.
|
| 223 |
+
*/
|
| 224 |
+
template<typename InputIterator>
|
| 225 |
+
device_vector(InputIterator first, InputIterator last)
|
| 226 |
+
:Parent(first,last) {}
|
| 227 |
+
|
| 228 |
+
/*! This constructor builds a \p device_vector from a range.
|
| 229 |
+
* \param first The beginning of the range.
|
| 230 |
+
* \param last The end of the range.
|
| 231 |
+
* \param alloc The allocator to use by this device_vector.
|
| 232 |
+
*/
|
| 233 |
+
template<typename InputIterator>
|
| 234 |
+
device_vector(InputIterator first, InputIterator last, const Alloc &alloc)
|
| 235 |
+
:Parent(first,last,alloc) {}
|
| 236 |
+
|
| 237 |
+
// declare these members for the purpose of Doxygenating them
|
| 238 |
+
// they actually exist in a derived-from class
|
| 239 |
+
#if 0
|
| 240 |
+
/*! \brief Resizes this vector to the specified number of elements.
|
| 241 |
+
* \param new_size Number of elements this vector should contain.
|
| 242 |
+
* \param x Data with which new elements should be populated.
|
| 243 |
+
* \throw std::length_error If n exceeds max_size().
|
| 244 |
+
*
|
| 245 |
+
* This method will resize this vector to the specified number of
|
| 246 |
+
* elements. If the number is smaller than this vector's current
|
| 247 |
+
* size this vector is truncated, otherwise this vector is
|
| 248 |
+
* extended and new elements are populated with given data.
|
| 249 |
+
*/
|
| 250 |
+
void resize(size_type new_size, const value_type &x = value_type());
|
| 251 |
+
|
| 252 |
+
/*! Returns the number of elements in this vector.
|
| 253 |
+
*/
|
| 254 |
+
size_type size(void) const;
|
| 255 |
+
|
| 256 |
+
/*! Returns the size() of the largest possible vector.
|
| 257 |
+
* \return The largest possible return value of size().
|
| 258 |
+
*/
|
| 259 |
+
size_type max_size(void) const;
|
| 260 |
+
|
| 261 |
+
/*! \brief If n is less than or equal to capacity(), this call has no effect.
|
| 262 |
+
* Otherwise, this method is a request for allocation of additional memory. If
|
| 263 |
+
* the request is successful, then capacity() is greater than or equal to
|
| 264 |
+
* n; otherwise, capacity() is unchanged. In either case, size() is unchanged.
|
| 265 |
+
* \throw std::length_error If n exceeds max_size().
|
| 266 |
+
*/
|
| 267 |
+
void reserve(size_type n);
|
| 268 |
+
|
| 269 |
+
/*! Returns the number of elements which have been reserved in this
|
| 270 |
+
* vector.
|
| 271 |
+
*/
|
| 272 |
+
size_type capacity(void) const;
|
| 273 |
+
|
| 274 |
+
/*! This method shrinks the capacity of this vector to exactly
|
| 275 |
+
* fit its elements.
|
| 276 |
+
*/
|
| 277 |
+
void shrink_to_fit(void);
|
| 278 |
+
|
| 279 |
+
/*! \brief Subscript access to the data contained in this vector_dev.
|
| 280 |
+
* \param n The index of the element for which data should be accessed.
|
| 281 |
+
* \return Read/write reference to data.
|
| 282 |
+
*
|
| 283 |
+
* This operator allows for easy, array-style, data access.
|
| 284 |
+
* Note that data access with this operator is unchecked and
|
| 285 |
+
* out_of_range lookups are not defined.
|
| 286 |
+
*/
|
| 287 |
+
reference operator[](size_type n);
|
| 288 |
+
|
| 289 |
+
/*! \brief Subscript read access to the data contained in this vector_dev.
|
| 290 |
+
* \param n The index of the element for which data should be accessed.
|
| 291 |
+
* \return Read reference to data.
|
| 292 |
+
*
|
| 293 |
+
* This operator allows for easy, array-style, data access.
|
| 294 |
+
* Note that data access with this operator is unchecked and
|
| 295 |
+
* out_of_range lookups are not defined.
|
| 296 |
+
*/
|
| 297 |
+
const_reference operator[](size_type n) const;
|
| 298 |
+
|
| 299 |
+
/*! This method returns an iterator pointing to the beginning of
|
| 300 |
+
* this vector.
|
| 301 |
+
* \return mStart
|
| 302 |
+
*/
|
| 303 |
+
iterator begin(void);
|
| 304 |
+
|
| 305 |
+
/*! This method returns a const_iterator pointing to the beginning
|
| 306 |
+
* of this vector.
|
| 307 |
+
* \return mStart
|
| 308 |
+
*/
|
| 309 |
+
const_iterator begin(void) const;
|
| 310 |
+
|
| 311 |
+
/*! This method returns a const_iterator pointing to the beginning
|
| 312 |
+
* of this vector.
|
| 313 |
+
* \return mStart
|
| 314 |
+
*/
|
| 315 |
+
const_iterator cbegin(void) const;
|
| 316 |
+
|
| 317 |
+
/*! This method returns a reverse_iterator pointing to the beginning of
|
| 318 |
+
* this vector's reversed sequence.
|
| 319 |
+
* \return A reverse_iterator pointing to the beginning of this
|
| 320 |
+
* vector's reversed sequence.
|
| 321 |
+
*/
|
| 322 |
+
reverse_iterator rbegin(void);
|
| 323 |
+
|
| 324 |
+
/*! This method returns a const_reverse_iterator pointing to the beginning of
|
| 325 |
+
* this vector's reversed sequence.
|
| 326 |
+
* \return A const_reverse_iterator pointing to the beginning of this
|
| 327 |
+
* vector's reversed sequence.
|
| 328 |
+
*/
|
| 329 |
+
const_reverse_iterator rbegin(void) const;
|
| 330 |
+
|
| 331 |
+
/*! This method returns a const_reverse_iterator pointing to the beginning of
|
| 332 |
+
* this vector's reversed sequence.
|
| 333 |
+
* \return A const_reverse_iterator pointing to the beginning of this
|
| 334 |
+
* vector's reversed sequence.
|
| 335 |
+
*/
|
| 336 |
+
const_reverse_iterator crbegin(void) const;
|
| 337 |
+
|
| 338 |
+
/*! This method returns an iterator pointing to one element past the
|
| 339 |
+
* last of this vector.
|
| 340 |
+
* \return begin() + size().
|
| 341 |
+
*/
|
| 342 |
+
iterator end(void);
|
| 343 |
+
|
| 344 |
+
/*! This method returns a const_iterator pointing to one element past the
|
| 345 |
+
* last of this vector.
|
| 346 |
+
* \return begin() + size().
|
| 347 |
+
*/
|
| 348 |
+
const_iterator end(void) const;
|
| 349 |
+
|
| 350 |
+
/*! This method returns a const_iterator pointing to one element past the
|
| 351 |
+
* last of this vector.
|
| 352 |
+
* \return begin() + size().
|
| 353 |
+
*/
|
| 354 |
+
const_iterator cend(void) const;
|
| 355 |
+
|
| 356 |
+
/*! This method returns a reverse_iterator pointing to one element past the
|
| 357 |
+
* last of this vector's reversed sequence.
|
| 358 |
+
* \return rbegin() + size().
|
| 359 |
+
*/
|
| 360 |
+
reverse_iterator rend(void);
|
| 361 |
+
|
| 362 |
+
/*! This method returns a const_reverse_iterator pointing to one element past the
|
| 363 |
+
* last of this vector's reversed sequence.
|
| 364 |
+
* \return rbegin() + size().
|
| 365 |
+
*/
|
| 366 |
+
const_reverse_iterator rend(void) const;
|
| 367 |
+
|
| 368 |
+
/*! This method returns a const_reverse_iterator pointing to one element past the
|
| 369 |
+
* last of this vector's reversed sequence.
|
| 370 |
+
* \return rbegin() + size().
|
| 371 |
+
*/
|
| 372 |
+
const_reverse_iterator crend(void) const;
|
| 373 |
+
|
| 374 |
+
/*! This method returns a const_reference referring to the first element of this
|
| 375 |
+
* vector.
|
| 376 |
+
* \return The first element of this vector.
|
| 377 |
+
*/
|
| 378 |
+
const_reference front(void) const;
|
| 379 |
+
|
| 380 |
+
/*! This method returns a reference pointing to the first element of this
|
| 381 |
+
* vector.
|
| 382 |
+
* \return The first element of this vector.
|
| 383 |
+
*/
|
| 384 |
+
reference front(void);
|
| 385 |
+
|
| 386 |
+
/*! This method returns a const reference pointing to the last element of
|
| 387 |
+
* this vector.
|
| 388 |
+
* \return The last element of this vector.
|
| 389 |
+
*/
|
| 390 |
+
const_reference back(void) const;
|
| 391 |
+
|
| 392 |
+
/*! This method returns a reference referring to the last element of
|
| 393 |
+
* this vector_dev.
|
| 394 |
+
* \return The last element of this vector.
|
| 395 |
+
*/
|
| 396 |
+
reference back(void);
|
| 397 |
+
|
| 398 |
+
/*! This method returns a pointer to this vector's first element.
|
| 399 |
+
* \return A pointer to the first element of this vector.
|
| 400 |
+
*/
|
| 401 |
+
pointer data(void);
|
| 402 |
+
|
| 403 |
+
/*! This method returns a const_pointer to this vector's first element.
|
| 404 |
+
* \return a const_pointer to the first element of this vector.
|
| 405 |
+
*/
|
| 406 |
+
const_pointer data(void) const;
|
| 407 |
+
|
| 408 |
+
/*! This method resizes this vector to 0.
|
| 409 |
+
*/
|
| 410 |
+
void clear(void);
|
| 411 |
+
|
| 412 |
+
/*! This method returns true iff size() == 0.
|
| 413 |
+
* \return true if size() == 0; false, otherwise.
|
| 414 |
+
*/
|
| 415 |
+
bool empty(void) const;
|
| 416 |
+
|
| 417 |
+
/*! This method appends the given element to the end of this vector.
|
| 418 |
+
* \param x The element to append.
|
| 419 |
+
*/
|
| 420 |
+
void push_back(const value_type &x);
|
| 421 |
+
|
| 422 |
+
/*! This method erases the last element of this vector, invalidating
|
| 423 |
+
* all iterators and references to it.
|
| 424 |
+
*/
|
| 425 |
+
void pop_back(void);
|
| 426 |
+
|
| 427 |
+
/*! This method swaps the contents of this device_vector with another vector.
|
| 428 |
+
* \param v The vector with which to swap.
|
| 429 |
+
*/
|
| 430 |
+
void swap(device_vector &v);
|
| 431 |
+
|
| 432 |
+
/*! This method removes the element at position pos.
|
| 433 |
+
* \param pos The position of the element of interest.
|
| 434 |
+
* \return An iterator pointing to the new location of the element that followed the element
|
| 435 |
+
* at position pos.
|
| 436 |
+
*/
|
| 437 |
+
iterator erase(iterator pos);
|
| 438 |
+
|
| 439 |
+
/*! This method removes the range of elements [first,last) from this vector.
|
| 440 |
+
* \param first The beginning of the range of elements to remove.
|
| 441 |
+
* \param last The end of the range of elements to remove.
|
| 442 |
+
* \return An iterator pointing to the new location of the element that followed the last
|
| 443 |
+
* element in the sequence [first,last).
|
| 444 |
+
*/
|
| 445 |
+
iterator erase(iterator first, iterator last);
|
| 446 |
+
|
| 447 |
+
/*! This method inserts a single copy of a given exemplar value at the
|
| 448 |
+
* specified position in this vector.
|
| 449 |
+
* \param position The insertion position.
|
| 450 |
+
* \param x The exemplar element to copy & insert.
|
| 451 |
+
* \return An iterator pointing to the newly inserted element.
|
| 452 |
+
*/
|
| 453 |
+
iterator insert(iterator position, const T &x);
|
| 454 |
+
|
| 455 |
+
/*! This method inserts a copy of an exemplar value to a range at the
|
| 456 |
+
* specified position in this vector.
|
| 457 |
+
* \param position The insertion position
|
| 458 |
+
* \param n The number of insertions to perform.
|
| 459 |
+
* \param x The value to replicate and insert.
|
| 460 |
+
*/
|
| 461 |
+
void insert(iterator position, size_type n, const T &x);
|
| 462 |
+
|
| 463 |
+
/*! This method inserts a copy of an input range at the specified position
|
| 464 |
+
* in this vector.
|
| 465 |
+
* \param position The insertion position.
|
| 466 |
+
* \param first The beginning of the range to copy.
|
| 467 |
+
* \param last The end of the range to copy.
|
| 468 |
+
*
|
| 469 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator>Input Iterator</a>,
|
| 470 |
+
* and \p InputIterator's \c value_type is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>.
|
| 471 |
+
*/
|
| 472 |
+
template<typename InputIterator>
|
| 473 |
+
void insert(iterator position, InputIterator first, InputIterator last);
|
| 474 |
+
|
| 475 |
+
/*! This version of \p assign replicates a given exemplar
|
| 476 |
+
* \p n times into this vector.
|
| 477 |
+
* \param n The number of times to copy \p x.
|
| 478 |
+
* \param x The exemplar element to replicate.
|
| 479 |
+
*/
|
| 480 |
+
void assign(size_type n, const T &x);
|
| 481 |
+
|
| 482 |
+
/*! This version of \p assign makes this vector a copy of a given input range.
|
| 483 |
+
* \param first The beginning of the range to copy.
|
| 484 |
+
* \param last The end of the range to copy.
|
| 485 |
+
*
|
| 486 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/named_req/InputIterator">Input Iterator</a>.
|
| 487 |
+
*/
|
| 488 |
+
template<typename InputIterator>
|
| 489 |
+
void assign(InputIterator first, InputIterator last);
|
| 490 |
+
|
| 491 |
+
/*! This method returns a copy of this vector's allocator.
|
| 492 |
+
* \return A copy of the alloctor used by this vector.
|
| 493 |
+
*/
|
| 494 |
+
allocator_type get_allocator(void) const;
|
| 495 |
+
#endif // end doxygen-only members
|
| 496 |
+
};
|
| 497 |
+
|
| 498 |
+
/*! Exchanges the values of two vectors.
|
| 499 |
+
* \p x The first \p device_vector of interest.
|
| 500 |
+
* \p y The second \p device_vector of interest.
|
| 501 |
+
*/
|
| 502 |
+
template<typename T, typename Alloc>
|
| 503 |
+
void swap(device_vector<T,Alloc> &a, device_vector<T,Alloc> &b)
|
| 504 |
+
{
|
| 505 |
+
a.swap(b);
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
/*! \} // containres
|
| 509 |
+
*/
|
| 510 |
+
|
| 511 |
+
THRUST_NAMESPACE_END
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/equal.h
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file equal.h
|
| 19 |
+
* \brief Equality between ranges
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup reductions
|
| 30 |
+
* \{
|
| 31 |
+
* \addtogroup comparisons
|
| 32 |
+
* \ingroup reductions
|
| 33 |
+
* \{
|
| 34 |
+
*/
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
/*! \p equal returns \c true if the two ranges <tt>[first1, last1)</tt>
|
| 38 |
+
* and <tt>[first2, first2 + (last1 - first1))</tt> are identical when
|
| 39 |
+
* compared element-by-element, and otherwise returns \c false.
|
| 40 |
+
*
|
| 41 |
+
* This version of \p equal returns \c true if and only if for every
|
| 42 |
+
* iterator \c i in <tt>[first1, last1)</tt>, <tt>*i == *(first2 + (i - first1))</tt>.
|
| 43 |
+
*
|
| 44 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 45 |
+
*
|
| 46 |
+
* \param exec The execution policy to use for parallelization.
|
| 47 |
+
* \param first1 The beginning of the first sequence.
|
| 48 |
+
* \param last1 The end of the first sequence.
|
| 49 |
+
* \param first2 The beginning of the second sequence.
|
| 50 |
+
* \return \c true, if the sequences are equal; \c false, otherwise.
|
| 51 |
+
*
|
| 52 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 53 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 54 |
+
* and \p InputIterator1's \c value_type is a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a>,
|
| 55 |
+
* and \p InputIterator1's \c value_type can be compared for equality with \c InputIterator2's \c value_type.
|
| 56 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 57 |
+
* and \p InputIterator2's \c value_type is a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a>,
|
| 58 |
+
* and \p InputIterator2's \c value_type can be compared for equality with \c InputIterator1's \c value_type.
|
| 59 |
+
*
|
| 60 |
+
* The following code snippet demonstrates how to use \p equal to test
|
| 61 |
+
* two ranges for equality using the \p thrust::host execution policy:
|
| 62 |
+
*
|
| 63 |
+
* \code
|
| 64 |
+
* #include <thrust/equal.h>
|
| 65 |
+
* #include <thrust/execution_policy.h>
|
| 66 |
+
* ...
|
| 67 |
+
* int A1[7] = {3, 1, 4, 1, 5, 9, 3};
|
| 68 |
+
* int A2[7] = {3, 1, 4, 2, 8, 5, 7};
|
| 69 |
+
* ...
|
| 70 |
+
* bool result = thrust::equal(thrust::host, A1, A1 + 7, A2);
|
| 71 |
+
*
|
| 72 |
+
* // result == false
|
| 73 |
+
* \endcode
|
| 74 |
+
*
|
| 75 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal
|
| 76 |
+
*/
|
| 77 |
+
template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2>
|
| 78 |
+
__host__ __device__
|
| 79 |
+
bool equal(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2);
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
/*! \p equal returns \c true if the two ranges <tt>[first1, last1)</tt>
|
| 83 |
+
* and <tt>[first2, first2 + (last1 - first1))</tt> are identical when
|
| 84 |
+
* compared element-by-element, and otherwise returns \c false.
|
| 85 |
+
*
|
| 86 |
+
* This version of \p equal returns \c true if and only if for every
|
| 87 |
+
* iterator \c i in <tt>[first1, last1)</tt>, <tt>*i == *(first2 + (i - first1))</tt>.
|
| 88 |
+
*
|
| 89 |
+
* \param first1 The beginning of the first sequence.
|
| 90 |
+
* \param last1 The end of the first sequence.
|
| 91 |
+
* \param first2 The beginning of the second sequence.
|
| 92 |
+
* \return \c true, if the sequences are equal; \c false, otherwise.
|
| 93 |
+
*
|
| 94 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 95 |
+
* and \p InputIterator1's \c value_type is a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a>,
|
| 96 |
+
* and \p InputIterator1's \c value_type can be compared for equality with \c InputIterator2's \c value_type.
|
| 97 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 98 |
+
* and \p InputIterator2's \c value_type is a model of <a href="https://en.cppreference.com/w/cpp/concepts/equality_comparable">Equality Comparable</a>,
|
| 99 |
+
* and \p InputIterator2's \c value_type can be compared for equality with \c InputIterator1's \c value_type.
|
| 100 |
+
*
|
| 101 |
+
* The following code snippet demonstrates how to use \p equal to test
|
| 102 |
+
* two ranges for equality.
|
| 103 |
+
*
|
| 104 |
+
* \code
|
| 105 |
+
* #include <thrust/equal.h>
|
| 106 |
+
* ...
|
| 107 |
+
* int A1[7] = {3, 1, 4, 1, 5, 9, 3};
|
| 108 |
+
* int A2[7] = {3, 1, 4, 2, 8, 5, 7};
|
| 109 |
+
* ...
|
| 110 |
+
* bool result = thrust::equal(A1, A1 + 7, A2);
|
| 111 |
+
*
|
| 112 |
+
* // result == false
|
| 113 |
+
* \endcode
|
| 114 |
+
*
|
| 115 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal
|
| 116 |
+
*/
|
| 117 |
+
template <typename InputIterator1, typename InputIterator2>
|
| 118 |
+
bool equal(InputIterator1 first1, InputIterator1 last1,
|
| 119 |
+
InputIterator2 first2);
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
/*! \p equal returns \c true if the two ranges <tt>[first1, last1)</tt>
|
| 123 |
+
* and <tt>[first2, first2 + (last1 - first1))</tt> are identical when
|
| 124 |
+
* compared element-by-element, and otherwise returns \c false.
|
| 125 |
+
*
|
| 126 |
+
* This version of \p equal returns \c true if and only if for every
|
| 127 |
+
* iterator \c i in <tt>[first1, last1)</tt>,
|
| 128 |
+
* <tt>binary_pred(*i, *(first2 + (i - first1)))</tt> is \c true.
|
| 129 |
+
*
|
| 130 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 131 |
+
*
|
| 132 |
+
* \param exec The execution policy to use for parallelization.
|
| 133 |
+
* \param first1 The beginning of the first sequence.
|
| 134 |
+
* \param last1 The end of the first sequence.
|
| 135 |
+
* \param first2 The beginning of the second sequence.
|
| 136 |
+
* \param binary_pred Binary predicate used to test element equality.
|
| 137 |
+
* \return \c true, if the sequences are equal; \c false, otherwise.
|
| 138 |
+
*
|
| 139 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 140 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 141 |
+
* and \p InputIterator1's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type.
|
| 142 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 143 |
+
* and \p InputIterator2's \c value_type is convertible to \p BinaryPredicate's \c second_argument_type.
|
| 144 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 145 |
+
*
|
| 146 |
+
* The following code snippet demonstrates how to use \p equal to compare the
|
| 147 |
+
* elements in two ranges modulo 2 using the \p thrust::host execution policy.
|
| 148 |
+
*
|
| 149 |
+
* \code
|
| 150 |
+
* #include <thrust/equal.h>
|
| 151 |
+
* #include <thrust/execution_policy.h>
|
| 152 |
+
* ...
|
| 153 |
+
*
|
| 154 |
+
* struct compare_modulo_two
|
| 155 |
+
* {
|
| 156 |
+
* __host__ __device__
|
| 157 |
+
* bool operator()(int x, int y) const
|
| 158 |
+
* {
|
| 159 |
+
* return (x % 2) == (y % 2);
|
| 160 |
+
* }
|
| 161 |
+
* };
|
| 162 |
+
* ...
|
| 163 |
+
* int x[6] = {0, 2, 4, 6, 8, 10};
|
| 164 |
+
* int y[6] = {1, 3, 5, 7, 9, 11};
|
| 165 |
+
*
|
| 166 |
+
* bool result = thrust::equal(x, x + 6, y, compare_modulo_two());
|
| 167 |
+
*
|
| 168 |
+
* // result is false
|
| 169 |
+
* \endcode
|
| 170 |
+
*
|
| 171 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal
|
| 172 |
+
*/
|
| 173 |
+
template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
|
| 174 |
+
__host__ __device__
|
| 175 |
+
bool equal(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate binary_pred);
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
/*! \p equal returns \c true if the two ranges <tt>[first1, last1)</tt>
|
| 179 |
+
* and <tt>[first2, first2 + (last1 - first1))</tt> are identical when
|
| 180 |
+
* compared element-by-element, and otherwise returns \c false.
|
| 181 |
+
*
|
| 182 |
+
* This version of \p equal returns \c true if and only if for every
|
| 183 |
+
* iterator \c i in <tt>[first1, last1)</tt>,
|
| 184 |
+
* <tt>binary_pred(*i, *(first2 + (i - first1)))</tt> is \c true.
|
| 185 |
+
*
|
| 186 |
+
* \param first1 The beginning of the first sequence.
|
| 187 |
+
* \param last1 The end of the first sequence.
|
| 188 |
+
* \param first2 The beginning of the second sequence.
|
| 189 |
+
* \param binary_pred Binary predicate used to test element equality.
|
| 190 |
+
* \return \c true, if the sequences are equal; \c false, otherwise.
|
| 191 |
+
*
|
| 192 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 193 |
+
* and \p InputIterator1's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type.
|
| 194 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 195 |
+
* and \p InputIterator2's \c value_type is convertible to \p BinaryPredicate's \c second_argument_type.
|
| 196 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 197 |
+
*
|
| 198 |
+
* The following code snippet demonstrates how to use \p equal to compare the
|
| 199 |
+
* elements in two ranges modulo 2.
|
| 200 |
+
*
|
| 201 |
+
* \code
|
| 202 |
+
* #include <thrust/equal.h>
|
| 203 |
+
*
|
| 204 |
+
* struct compare_modulo_two
|
| 205 |
+
* {
|
| 206 |
+
* __host__ __device__
|
| 207 |
+
* bool operator()(int x, int y) const
|
| 208 |
+
* {
|
| 209 |
+
* return (x % 2) == (y % 2);
|
| 210 |
+
* }
|
| 211 |
+
* };
|
| 212 |
+
* ...
|
| 213 |
+
* int x[6] = {0, 2, 4, 6, 8, 10};
|
| 214 |
+
* int y[6] = {1, 3, 5, 7, 9, 11};
|
| 215 |
+
*
|
| 216 |
+
* bool result = thrust::equal(x, x + 5, y, compare_modulo_two());
|
| 217 |
+
*
|
| 218 |
+
* // result is true
|
| 219 |
+
* \endcode
|
| 220 |
+
*
|
| 221 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/equal
|
| 222 |
+
*/
|
| 223 |
+
template <typename InputIterator1, typename InputIterator2,
|
| 224 |
+
typename BinaryPredicate>
|
| 225 |
+
bool equal(InputIterator1 first1, InputIterator1 last1,
|
| 226 |
+
InputIterator2 first2, BinaryPredicate binary_pred);
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
/*! \} // end comparisons
|
| 230 |
+
* \} // end reductions
|
| 231 |
+
*/
|
| 232 |
+
|
| 233 |
+
THRUST_NAMESPACE_END
|
| 234 |
+
|
| 235 |
+
#include <thrust/detail/equal.inl>
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/event.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file thrust/event.h
|
| 18 |
+
* \brief `thrust::event`, an asynchronous handle type.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/future.h>
|
| 24 |
+
|
| 25 |
+
// TODO: Actually separate `<thrust/future.h>` into two headers.
|
| 26 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/execution_policy.h
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file thrust/execution_policy.h
|
| 18 |
+
* \brief Thrust execution policies.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/detail/execution_policy.h>
|
| 25 |
+
#include <thrust/detail/execute_with_allocator.h>
|
| 26 |
+
#include <thrust/detail/seq.h>
|
| 27 |
+
|
| 28 |
+
//! \cond
|
| 29 |
+
|
| 30 |
+
// #include the host system's execution_policy header
|
| 31 |
+
#define __THRUST_HOST_SYSTEM_EXECUTION_POLICY_HEADER <__THRUST_HOST_SYSTEM_ROOT/execution_policy.h>
|
| 32 |
+
#include __THRUST_HOST_SYSTEM_EXECUTION_POLICY_HEADER
|
| 33 |
+
#undef __THRUST_HOST_SYSTEM_EXECUTION_POLICY_HEADER
|
| 34 |
+
|
| 35 |
+
// #include the device system's execution_policy.h header
|
| 36 |
+
#define __THRUST_DEVICE_SYSTEM_EXECUTION_POLICY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/execution_policy.h>
|
| 37 |
+
#include __THRUST_DEVICE_SYSTEM_EXECUTION_POLICY_HEADER
|
| 38 |
+
#undef __THRUST_DEVICE_SYSTEM_EXECUTION_POLICY_HEADER
|
| 39 |
+
|
| 40 |
+
//! \endcond
|
| 41 |
+
|
| 42 |
+
THRUST_NAMESPACE_BEGIN
|
| 43 |
+
|
| 44 |
+
/*! \cond
|
| 45 |
+
*/
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
namespace detail
|
| 49 |
+
{
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
typedef thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::detail::par_t host_t;
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
typedef thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::detail::par_t device_t;
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
} // end detail
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
/*! \endcond
|
| 62 |
+
*/
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
/*! \addtogroup execution_policies Parallel Execution Policies
|
| 66 |
+
* \{
|
| 67 |
+
*/
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
// define execution_policy for the purpose of Doxygenating it
|
| 71 |
+
// it is actually defined elsewhere
|
| 72 |
+
#if 0
|
| 73 |
+
/*! \p execution_policy is the base class for all Thrust parallel execution policies
|
| 74 |
+
* like \p thrust::host, \p thrust::device, and each backend system's tag type.
|
| 75 |
+
*
|
| 76 |
+
* Custom user-defined backends should derive a policy from this type in order to
|
| 77 |
+
* interoperate with Thrust algorithm dispatch.
|
| 78 |
+
*
|
| 79 |
+
* The following code snippet demonstrates how to derive a standalone custom execution policy
|
| 80 |
+
* from \p thrust::execution_policy to implement a backend which only implements \p for_each:
|
| 81 |
+
*
|
| 82 |
+
* \code
|
| 83 |
+
* #include <thrust/execution_policy.h>
|
| 84 |
+
* #include <iostream>
|
| 85 |
+
*
|
| 86 |
+
* // define a type derived from thrust::execution_policy to distinguish our custom execution policy:
|
| 87 |
+
* struct my_policy : thrust::execution_policy<my_policy> {};
|
| 88 |
+
*
|
| 89 |
+
* // overload for_each on my_policy
|
| 90 |
+
* template<typename Iterator, typename Function>
|
| 91 |
+
* Iterator for_each(my_policy, Iterator first, Iterator last, Function f)
|
| 92 |
+
* {
|
| 93 |
+
* std::cout << "Hello, world from for_each(my_policy)!" << std::endl;
|
| 94 |
+
*
|
| 95 |
+
* for(; first < last; ++first)
|
| 96 |
+
* {
|
| 97 |
+
* f(*first);
|
| 98 |
+
* }
|
| 99 |
+
*
|
| 100 |
+
* return first;
|
| 101 |
+
* }
|
| 102 |
+
*
|
| 103 |
+
* struct ignore_argument
|
| 104 |
+
* {
|
| 105 |
+
* void operator()(int) {}
|
| 106 |
+
* };
|
| 107 |
+
*
|
| 108 |
+
* int main()
|
| 109 |
+
* {
|
| 110 |
+
* int data[4];
|
| 111 |
+
*
|
| 112 |
+
* // dispatch thrust::for_each using our custom policy:
|
| 113 |
+
* my_policy exec;
|
| 114 |
+
* thrust::for_each(exec, data, data + 4, ignore_argument());
|
| 115 |
+
*
|
| 116 |
+
* // can't dispatch thrust::transform because no overload exists for my_policy:
|
| 117 |
+
* //thrust::transform(exec, data, data, + 4, data, thrust::identity<int>()); // error!
|
| 118 |
+
*
|
| 119 |
+
* return 0;
|
| 120 |
+
* }
|
| 121 |
+
* \endcode
|
| 122 |
+
*
|
| 123 |
+
* \see host_execution_policy
|
| 124 |
+
* \see device_execution_policy
|
| 125 |
+
*/
|
| 126 |
+
template<typename DerivedPolicy>
|
| 127 |
+
struct execution_policy : thrust::detail::execution_policy_base<DerivedPolicy>
|
| 128 |
+
{};
|
| 129 |
+
#endif
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
/*! \p host_execution_policy is the base class for all Thrust parallel execution policies
|
| 133 |
+
* which are derived from Thrust's default host backend system configured with the \p THRUST_HOST_SYSTEM
|
| 134 |
+
* macro.
|
| 135 |
+
*
|
| 136 |
+
* Custom user-defined backends which wish to inherit the functionality of Thrust's host backend system
|
| 137 |
+
* should derive a policy from this type in order to interoperate with Thrust algorithm dispatch.
|
| 138 |
+
*
|
| 139 |
+
* The following code snippet demonstrates how to derive a standalone custom execution policy from
|
| 140 |
+
* \p thrust::host_execution_policy to implement a backend which specializes \p for_each while inheriting
|
| 141 |
+
* the behavior of every other algorithm from the host system:
|
| 142 |
+
*
|
| 143 |
+
* \code
|
| 144 |
+
* #include <thrust/execution_policy.h>
|
| 145 |
+
* #include <iostream>
|
| 146 |
+
*
|
| 147 |
+
* // define a type derived from thrust::host_execution_policy to distinguish our custom execution policy:
|
| 148 |
+
* struct my_policy : thrust::host_execution_policy<my_policy> {};
|
| 149 |
+
*
|
| 150 |
+
* // overload for_each on my_policy
|
| 151 |
+
* template<typename Iterator, typename Function>
|
| 152 |
+
* Iterator for_each(my_policy, Iterator first, Iterator last, Function f)
|
| 153 |
+
* {
|
| 154 |
+
* std::cout << "Hello, world from for_each(my_policy)!" << std::endl;
|
| 155 |
+
*
|
| 156 |
+
* for(; first < last; ++first)
|
| 157 |
+
* {
|
| 158 |
+
* f(*first);
|
| 159 |
+
* }
|
| 160 |
+
*
|
| 161 |
+
* return first;
|
| 162 |
+
* }
|
| 163 |
+
*
|
| 164 |
+
* struct ignore_argument
|
| 165 |
+
* {
|
| 166 |
+
* void operator()(int) {}
|
| 167 |
+
* };
|
| 168 |
+
*
|
| 169 |
+
* int main()
|
| 170 |
+
* {
|
| 171 |
+
* int data[4];
|
| 172 |
+
*
|
| 173 |
+
* // dispatch thrust::for_each using our custom policy:
|
| 174 |
+
* my_policy exec;
|
| 175 |
+
* thrust::for_each(exec, data, data + 4, ignore_argument());
|
| 176 |
+
*
|
| 177 |
+
* // dispatch thrust::transform whose behavior our policy inherits
|
| 178 |
+
* thrust::transform(exec, data, data, + 4, data, thrust::identity<int>());
|
| 179 |
+
*
|
| 180 |
+
* return 0;
|
| 181 |
+
* }
|
| 182 |
+
* \endcode
|
| 183 |
+
*
|
| 184 |
+
* \see execution_policy
|
| 185 |
+
* \see device_execution_policy
|
| 186 |
+
*/
|
| 187 |
+
template<typename DerivedPolicy>
|
| 188 |
+
struct host_execution_policy
|
| 189 |
+
: thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::execution_policy<DerivedPolicy>
|
| 190 |
+
{};
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
/*! \p device_execution_policy is the base class for all Thrust parallel execution policies
|
| 194 |
+
* which are derived from Thrust's default device backend system configured with the \p THRUST_DEVICE_SYSTEM
|
| 195 |
+
* macro.
|
| 196 |
+
*
|
| 197 |
+
* Custom user-defined backends which wish to inherit the functionality of Thrust's device backend system
|
| 198 |
+
* should derive a policy from this type in order to interoperate with Thrust algorithm dispatch.
|
| 199 |
+
*
|
| 200 |
+
* The following code snippet demonstrates how to derive a standalone custom execution policy from
|
| 201 |
+
* \p thrust::device_execution_policy to implement a backend which specializes \p for_each while inheriting
|
| 202 |
+
* the behavior of every other algorithm from the device system:
|
| 203 |
+
*
|
| 204 |
+
* \code
|
| 205 |
+
* #include <thrust/execution_policy.h>
|
| 206 |
+
* #include <iostream>
|
| 207 |
+
*
|
| 208 |
+
* // define a type derived from thrust::device_execution_policy to distinguish our custom execution policy:
|
| 209 |
+
* struct my_policy : thrust::device_execution_policy<my_policy> {};
|
| 210 |
+
*
|
| 211 |
+
* // overload for_each on my_policy
|
| 212 |
+
* template<typename Iterator, typename Function>
|
| 213 |
+
* Iterator for_each(my_policy, Iterator first, Iterator last, Function f)
|
| 214 |
+
* {
|
| 215 |
+
* std::cout << "Hello, world from for_each(my_policy)!" << std::endl;
|
| 216 |
+
*
|
| 217 |
+
* for(; first < last; ++first)
|
| 218 |
+
* {
|
| 219 |
+
* f(*first);
|
| 220 |
+
* }
|
| 221 |
+
*
|
| 222 |
+
* return first;
|
| 223 |
+
* }
|
| 224 |
+
*
|
| 225 |
+
* struct ignore_argument
|
| 226 |
+
* {
|
| 227 |
+
* void operator()(int) {}
|
| 228 |
+
* };
|
| 229 |
+
*
|
| 230 |
+
* int main()
|
| 231 |
+
* {
|
| 232 |
+
* int data[4];
|
| 233 |
+
*
|
| 234 |
+
* // dispatch thrust::for_each using our custom policy:
|
| 235 |
+
* my_policy exec;
|
| 236 |
+
* thrust::for_each(exec, data, data + 4, ignore_argument());
|
| 237 |
+
*
|
| 238 |
+
* // dispatch thrust::transform whose behavior our policy inherits
|
| 239 |
+
* thrust::transform(exec, data, data, + 4, data, thrust::identity<int>());
|
| 240 |
+
*
|
| 241 |
+
* return 0;
|
| 242 |
+
* }
|
| 243 |
+
* \endcode
|
| 244 |
+
*
|
| 245 |
+
* \see execution_policy
|
| 246 |
+
* \see host_execution_policy
|
| 247 |
+
*/
|
| 248 |
+
template<typename DerivedPolicy>
|
| 249 |
+
struct device_execution_policy
|
| 250 |
+
: thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::execution_policy<DerivedPolicy>
|
| 251 |
+
{};
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
/*! \p thrust::host is the default parallel execution policy associated with Thrust's host backend system
|
| 255 |
+
* configured by the \p THRUST_HOST_SYSTEM macro.
|
| 256 |
+
*
|
| 257 |
+
* Instead of relying on implicit algorithm dispatch through iterator system tags, users may directly target
|
| 258 |
+
* algorithm dispatch at Thrust's host system by providing \p thrust::host as an algorithm parameter.
|
| 259 |
+
*
|
| 260 |
+
* Explicit dispatch can be useful in avoiding the introduction of data copies into containers such as
|
| 261 |
+
* \p thrust::host_vector.
|
| 262 |
+
*
|
| 263 |
+
* Note that even though \p thrust::host targets the host CPU, it is a parallel execution policy. That is,
|
| 264 |
+
* the order that an algorithm invokes functors or dereferences iterators is not defined.
|
| 265 |
+
*
|
| 266 |
+
* The type of \p thrust::host is implementation-defined.
|
| 267 |
+
*
|
| 268 |
+
* The following code snippet demonstrates how to use \p thrust::host to explicitly dispatch an invocation
|
| 269 |
+
* of \p thrust::for_each to the host backend system:
|
| 270 |
+
*
|
| 271 |
+
* \code
|
| 272 |
+
* #include <thrust/for_each.h>
|
| 273 |
+
* #include <thrust/execution_policy.h>
|
| 274 |
+
* #include <cstdio>
|
| 275 |
+
*
|
| 276 |
+
* struct printf_functor
|
| 277 |
+
* {
|
| 278 |
+
* __host__ __device__
|
| 279 |
+
* void operator()(int x)
|
| 280 |
+
* {
|
| 281 |
+
* printf("%d\n", x);
|
| 282 |
+
* }
|
| 283 |
+
* };
|
| 284 |
+
* ...
|
| 285 |
+
* int vec[] = { 0, 1, 2 };
|
| 286 |
+
*
|
| 287 |
+
* thrust::for_each(thrust::host, vec, vec + 3, printf_functor());
|
| 288 |
+
*
|
| 289 |
+
* // 0 1 2 is printed to standard output in some unspecified order
|
| 290 |
+
* \endcode
|
| 291 |
+
*
|
| 292 |
+
* \see host_execution_policy
|
| 293 |
+
* \see thrust::device
|
| 294 |
+
*/
|
| 295 |
+
static const detail::host_t host;
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
/*! \p thrust::device is the default parallel execution policy associated with Thrust's device backend system
|
| 299 |
+
* configured by the \p THRUST_DEVICE_SYSTEM macro.
|
| 300 |
+
*
|
| 301 |
+
* Instead of relying on implicit algorithm dispatch through iterator system tags, users may directly target
|
| 302 |
+
* algorithm dispatch at Thrust's device system by providing \p thrust::device as an algorithm parameter.
|
| 303 |
+
*
|
| 304 |
+
* Explicit dispatch can be useful in avoiding the introduction of data copies into containers such as
|
| 305 |
+
* \p thrust::device_vector or to avoid wrapping e.g. raw pointers allocated by the CUDA API with types
|
| 306 |
+
* such as \p thrust::device_ptr.
|
| 307 |
+
*
|
| 308 |
+
* The user must take care to guarantee that the iterators provided to an algorithm are compatible with
|
| 309 |
+
* the device backend system. For example, raw pointers allocated by <tt>std::malloc</tt> typically
|
| 310 |
+
* cannot be dereferenced by a GPU. For this reason, raw pointers allocated by host APIs should not be mixed
|
| 311 |
+
* with a \p thrust::device algorithm invocation when the device backend is CUDA.
|
| 312 |
+
*
|
| 313 |
+
* The type of \p thrust::device is implementation-defined.
|
| 314 |
+
*
|
| 315 |
+
* The following code snippet demonstrates how to use \p thrust::device to explicitly dispatch an invocation
|
| 316 |
+
* of \p thrust::for_each to the device backend system:
|
| 317 |
+
*
|
| 318 |
+
* \code
|
| 319 |
+
* #include <thrust/for_each.h>
|
| 320 |
+
* #include <thrust/device_vector.h>
|
| 321 |
+
* #include <thrust/execution_policy.h>
|
| 322 |
+
* #include <cstdio>
|
| 323 |
+
*
|
| 324 |
+
* struct printf_functor
|
| 325 |
+
* {
|
| 326 |
+
* __host__ __device__
|
| 327 |
+
* void operator()(int x)
|
| 328 |
+
* {
|
| 329 |
+
* printf("%d\n", x);
|
| 330 |
+
* }
|
| 331 |
+
* };
|
| 332 |
+
* ...
|
| 333 |
+
* thrust::device_vector<int> vec(3);
|
| 334 |
+
* vec[0] = 0; vec[1] = 1; vec[2] = 2;
|
| 335 |
+
*
|
| 336 |
+
* thrust::for_each(thrust::device, vec.begin(), vec.end(), printf_functor());
|
| 337 |
+
*
|
| 338 |
+
* // 0 1 2 is printed to standard output in some unspecified order
|
| 339 |
+
* \endcode
|
| 340 |
+
*
|
| 341 |
+
* \see host_execution_policy
|
| 342 |
+
* \see thrust::device
|
| 343 |
+
*/
|
| 344 |
+
THRUST_INLINE_CONSTANT detail::device_t device;
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
// define seq for the purpose of Doxygenating it
|
| 348 |
+
// it is actually defined elsewhere
|
| 349 |
+
#if 0
|
| 350 |
+
/*! \p thrust::seq is an execution policy which requires an algorithm invocation to execute sequentially
|
| 351 |
+
* in the current thread. It can not be configured by a compile-time macro.
|
| 352 |
+
*
|
| 353 |
+
* The type of \p thrust::seq is implementation-defined.
|
| 354 |
+
*
|
| 355 |
+
* The following code snippet demonstrates how to use \p thrust::seq to explicitly execute an invocation
|
| 356 |
+
* of \p thrust::for_each sequentially:
|
| 357 |
+
*
|
| 358 |
+
* \code
|
| 359 |
+
* #include <thrust/for_each.h>
|
| 360 |
+
* #include <thrust/execution_policy.h>
|
| 361 |
+
* #include <vector>
|
| 362 |
+
* #include <cstdio>
|
| 363 |
+
*
|
| 364 |
+
* struct printf_functor
|
| 365 |
+
* {
|
| 366 |
+
* __host__ __device__
|
| 367 |
+
* void operator()(int x)
|
| 368 |
+
* {
|
| 369 |
+
* printf("%d\n", x);
|
| 370 |
+
* }
|
| 371 |
+
* };
|
| 372 |
+
* ...
|
| 373 |
+
* std::vector<int> vec(3);
|
| 374 |
+
* vec[0] = 0; vec[1] = 1; vec[2] = 2;
|
| 375 |
+
*
|
| 376 |
+
* thrust::for_each(thrust::seq, vec.begin(), vec.end(), printf_functor());
|
| 377 |
+
*
|
| 378 |
+
* // 0 1 2 is printed to standard output in sequential order
|
| 379 |
+
* \endcode
|
| 380 |
+
*
|
| 381 |
+
* \see thrust::host
|
| 382 |
+
* \see thrust::device
|
| 383 |
+
*/
|
| 384 |
+
static const detail::seq_t seq;
|
| 385 |
+
#endif
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
/*! \}
|
| 389 |
+
*/
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
THRUST_NAMESPACE_END
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/extrema.h
ADDED
|
@@ -0,0 +1,801 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file extrema.h
|
| 18 |
+
* \brief Functions for computing computing extremal values
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/detail/execution_policy.h>
|
| 25 |
+
#include <thrust/pair.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! This version of \p min returns the smaller of two values, given a comparison operation.
|
| 30 |
+
* \param lhs The first value to compare.
|
| 31 |
+
* \param rhs The second value to compare.
|
| 32 |
+
* \param comp A comparison operation.
|
| 33 |
+
* \return The smaller element.
|
| 34 |
+
*
|
| 35 |
+
* \tparam T is convertible to \p BinaryPredicate's first argument type and to its second argument type.
|
| 36 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">BinaryPredicate</a>.
|
| 37 |
+
*
|
| 38 |
+
* The following code snippet demonstrates how to use \p min to compute the smaller of two
|
| 39 |
+
* key-value objects.
|
| 40 |
+
*
|
| 41 |
+
* \code
|
| 42 |
+
* #include <thrust/extrema.h>
|
| 43 |
+
* ...
|
| 44 |
+
* struct key_value
|
| 45 |
+
* {
|
| 46 |
+
* int key;
|
| 47 |
+
* int value;
|
| 48 |
+
* };
|
| 49 |
+
*
|
| 50 |
+
* struct compare_key_value
|
| 51 |
+
* {
|
| 52 |
+
* __host__ __device__
|
| 53 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 54 |
+
* {
|
| 55 |
+
* return lhs.key < rhs.key;
|
| 56 |
+
* }
|
| 57 |
+
* };
|
| 58 |
+
*
|
| 59 |
+
* ...
|
| 60 |
+
* key_value a = {13, 0};
|
| 61 |
+
* key_value b = { 7, 1);
|
| 62 |
+
*
|
| 63 |
+
* key_value smaller = thrust::min(a, b, compare_key_value());
|
| 64 |
+
*
|
| 65 |
+
* // smaller is {7, 1}
|
| 66 |
+
* \endcode
|
| 67 |
+
*
|
| 68 |
+
* \note Returns the first argument when the arguments are equivalent.
|
| 69 |
+
* \see max
|
| 70 |
+
*/
|
| 71 |
+
template<typename T, typename BinaryPredicate>
|
| 72 |
+
__host__ __device__
|
| 73 |
+
T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp);
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
/*! This version of \p min returns the smaller of two values.
|
| 77 |
+
* \param lhs The first value to compare.
|
| 78 |
+
* \param rhs The second value to compare.
|
| 79 |
+
* \return The smaller element.
|
| 80 |
+
*
|
| 81 |
+
* \tparam T is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 82 |
+
*
|
| 83 |
+
* The following code snippet demonstrates how to use \p min to compute the smaller of two
|
| 84 |
+
* integers.
|
| 85 |
+
*
|
| 86 |
+
* \code
|
| 87 |
+
* #include <thrust/extrema.h>
|
| 88 |
+
* ...
|
| 89 |
+
* int a = 13;
|
| 90 |
+
* int b = 7;
|
| 91 |
+
*
|
| 92 |
+
* int smaller = thrust::min(a, b);
|
| 93 |
+
*
|
| 94 |
+
* // smaller is 7
|
| 95 |
+
* \endcode
|
| 96 |
+
*
|
| 97 |
+
* \note Returns the first argument when the arguments are equivalent.
|
| 98 |
+
* \see max
|
| 99 |
+
*/
|
| 100 |
+
template<typename T>
|
| 101 |
+
__host__ __device__
|
| 102 |
+
T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs);
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
/*! This version of \p max returns the larger of two values, given a comparison operation.
|
| 106 |
+
* \param lhs The first value to compare.
|
| 107 |
+
* \param rhs The second value to compare.
|
| 108 |
+
* \param comp A comparison operation.
|
| 109 |
+
* \return The larger element.
|
| 110 |
+
*
|
| 111 |
+
* \tparam T is convertible to \p BinaryPredicate's first argument type and to its second argument type.
|
| 112 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">BinaryPredicate</a>.
|
| 113 |
+
*
|
| 114 |
+
* The following code snippet demonstrates how to use \p max to compute the larger of two
|
| 115 |
+
* key-value objects.
|
| 116 |
+
*
|
| 117 |
+
* \code
|
| 118 |
+
* #include <thrust/extrema.h>
|
| 119 |
+
* ...
|
| 120 |
+
* struct key_value
|
| 121 |
+
* {
|
| 122 |
+
* int key;
|
| 123 |
+
* int value;
|
| 124 |
+
* };
|
| 125 |
+
*
|
| 126 |
+
* struct compare_key_value
|
| 127 |
+
* {
|
| 128 |
+
* __host__ __device__
|
| 129 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 130 |
+
* {
|
| 131 |
+
* return lhs.key < rhs.key;
|
| 132 |
+
* }
|
| 133 |
+
* };
|
| 134 |
+
*
|
| 135 |
+
* ...
|
| 136 |
+
* key_value a = {13, 0};
|
| 137 |
+
* key_value b = { 7, 1);
|
| 138 |
+
*
|
| 139 |
+
* key_value larger = thrust::max(a, b, compare_key_value());
|
| 140 |
+
*
|
| 141 |
+
* // larger is {13, 0}
|
| 142 |
+
* \endcode
|
| 143 |
+
*
|
| 144 |
+
* \note Returns the first argument when the arguments are equivalent.
|
| 145 |
+
* \see min
|
| 146 |
+
*/
|
| 147 |
+
template<typename T, typename BinaryPredicate>
|
| 148 |
+
__host__ __device__
|
| 149 |
+
T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp);
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
/*! This version of \p max returns the larger of two values.
|
| 153 |
+
* \param lhs The first value to compare.
|
| 154 |
+
* \param rhs The second value to compare.
|
| 155 |
+
* \return The larger element.
|
| 156 |
+
*
|
| 157 |
+
* \tparam T is a model of <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 158 |
+
*
|
| 159 |
+
* The following code snippet demonstrates how to use \p max to compute the larger of two
|
| 160 |
+
* integers.
|
| 161 |
+
*
|
| 162 |
+
* \code
|
| 163 |
+
* #include <thrust/extrema.h>
|
| 164 |
+
* ...
|
| 165 |
+
* int a = 13;
|
| 166 |
+
* int b = 7;
|
| 167 |
+
*
|
| 168 |
+
* int larger = thrust::min(a, b);
|
| 169 |
+
*
|
| 170 |
+
* // larger is 13
|
| 171 |
+
* \endcode
|
| 172 |
+
*
|
| 173 |
+
* \note Returns the first argument when the arguments are equivalent.
|
| 174 |
+
* \see min
|
| 175 |
+
*/
|
| 176 |
+
template<typename T>
|
| 177 |
+
__host__ __device__
|
| 178 |
+
T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs);
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
/*! \addtogroup reductions
|
| 182 |
+
* \{
|
| 183 |
+
* \addtogroup extrema
|
| 184 |
+
* \ingroup reductions
|
| 185 |
+
* \{
|
| 186 |
+
*/
|
| 187 |
+
|
| 188 |
+
/*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
|
| 189 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 190 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value smaller
|
| 191 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 192 |
+
* empty range.
|
| 193 |
+
*
|
| 194 |
+
* The two versions of \p min_element differ in how they define whether one element is
|
| 195 |
+
* less than another. This version compares objects using \c operator<. Specifically,
|
| 196 |
+
* this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 197 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*j < *i</tt> is
|
| 198 |
+
* \c false.
|
| 199 |
+
*
|
| 200 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 201 |
+
*
|
| 202 |
+
* \param exec The execution policy to use for parallelization.
|
| 203 |
+
* \param first The beginning of the sequence.
|
| 204 |
+
* \param last The end of the sequence.
|
| 205 |
+
* \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
|
| 206 |
+
* if it is not an empty range; \p last, otherwise.
|
| 207 |
+
*
|
| 208 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 209 |
+
* and \c ForwardIterator's \c value_type is a model of
|
| 210 |
+
* <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 211 |
+
*
|
| 212 |
+
* \code
|
| 213 |
+
* #include <thrust/extrema.h>
|
| 214 |
+
* #include <thrust/execution_policy.h>
|
| 215 |
+
* ...
|
| 216 |
+
* int data[6] = {1, 0, 2, 2, 1, 3};
|
| 217 |
+
* int *result = thrust::min_element(thrust::host, data, data + 6);
|
| 218 |
+
*
|
| 219 |
+
* // result is data + 1
|
| 220 |
+
* // *result is 0
|
| 221 |
+
* \endcode
|
| 222 |
+
*
|
| 223 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/min_element
|
| 224 |
+
*/
|
| 225 |
+
template<typename DerivedPolicy, typename ForwardIterator>
|
| 226 |
+
__host__ __device__
|
| 227 |
+
ForwardIterator min_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last);
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
/*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
|
| 231 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 232 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value smaller
|
| 233 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 234 |
+
* empty range.
|
| 235 |
+
*
|
| 236 |
+
* The two versions of \p min_element differ in how they define whether one element is
|
| 237 |
+
* less than another. This version compares objects using \c operator<. Specifically,
|
| 238 |
+
* this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 239 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*j < *i</tt> is
|
| 240 |
+
* \c false.
|
| 241 |
+
*
|
| 242 |
+
* \param first The beginning of the sequence.
|
| 243 |
+
* \param last The end of the sequence.
|
| 244 |
+
* \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
|
| 245 |
+
* if it is not an empty range; \p last, otherwise.
|
| 246 |
+
*
|
| 247 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 248 |
+
* and \c ForwardIterator's \c value_type is a model of
|
| 249 |
+
* <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 250 |
+
*
|
| 251 |
+
* \code
|
| 252 |
+
* #include <thrust/extrema.h>
|
| 253 |
+
* ...
|
| 254 |
+
* int data[6] = {1, 0, 2, 2, 1, 3};
|
| 255 |
+
* int *result = thrust::min_element(data, data + 6);
|
| 256 |
+
*
|
| 257 |
+
* // result is data + 1
|
| 258 |
+
* // *result is 0
|
| 259 |
+
* \endcode
|
| 260 |
+
*
|
| 261 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/min_element
|
| 262 |
+
*/
|
| 263 |
+
template <typename ForwardIterator>
|
| 264 |
+
ForwardIterator min_element(ForwardIterator first, ForwardIterator last);
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
/*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
|
| 268 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 269 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value smaller
|
| 270 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 271 |
+
* empty range.
|
| 272 |
+
*
|
| 273 |
+
* The two versions of \p min_element differ in how they define whether one element is
|
| 274 |
+
* less than another. This version compares objects using a function object \p comp.
|
| 275 |
+
* Specifically, this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 276 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*j, *i)</tt> is
|
| 277 |
+
* \c false.
|
| 278 |
+
*
|
| 279 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 280 |
+
*
|
| 281 |
+
* \param exec The execution policy to use for parallelization.
|
| 282 |
+
* \param first The beginning of the sequence.
|
| 283 |
+
* \param last The end of the sequence.
|
| 284 |
+
* \param comp A binary predicate used for comparison.
|
| 285 |
+
* \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
|
| 286 |
+
* if it is not an empty range; \p last, otherwise.
|
| 287 |
+
*
|
| 288 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 289 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 290 |
+
* and \p ForwardIterator's \c value_type is convertible to both \p comp's
|
| 291 |
+
* \c first_argument_type and \c second_argument_type.
|
| 292 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 293 |
+
*
|
| 294 |
+
* The following code snippet demonstrates how to use \p min_element to find the smallest element
|
| 295 |
+
* of a collection of key-value pairs using the \p thrust::host execution policy for parallelization:
|
| 296 |
+
*
|
| 297 |
+
* \code
|
| 298 |
+
* #include <thrust/extrema.h>
|
| 299 |
+
* #include <thrust/execution_policy.h>
|
| 300 |
+
* ...
|
| 301 |
+
*
|
| 302 |
+
* struct key_value
|
| 303 |
+
* {
|
| 304 |
+
* int key;
|
| 305 |
+
* int value;
|
| 306 |
+
* };
|
| 307 |
+
*
|
| 308 |
+
* struct compare_key_value
|
| 309 |
+
* {
|
| 310 |
+
* __host__ __device__
|
| 311 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 312 |
+
* {
|
| 313 |
+
* return lhs.key < rhs.key;
|
| 314 |
+
* }
|
| 315 |
+
* };
|
| 316 |
+
*
|
| 317 |
+
* ...
|
| 318 |
+
* key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
|
| 319 |
+
*
|
| 320 |
+
* key_value *smallest = thrust::min_element(thrust::host, data, data + 4, compare_key_value());
|
| 321 |
+
*
|
| 322 |
+
* // smallest == data + 1
|
| 323 |
+
* // *smallest == {0,7}
|
| 324 |
+
* \endcode
|
| 325 |
+
*
|
| 326 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/min_element
|
| 327 |
+
*/
|
| 328 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
|
| 329 |
+
__host__ __device__
|
| 330 |
+
ForwardIterator min_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp);
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
/*! \p min_element finds the smallest element in the range <tt>[first, last)</tt>.
|
| 334 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 335 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value smaller
|
| 336 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 337 |
+
* empty range.
|
| 338 |
+
*
|
| 339 |
+
* The two versions of \p min_element differ in how they define whether one element is
|
| 340 |
+
* less than another. This version compares objects using a function object \p comp.
|
| 341 |
+
* Specifically, this version of \p min_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 342 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*j, *i)</tt> is
|
| 343 |
+
* \c false.
|
| 344 |
+
*
|
| 345 |
+
* \param first The beginning of the sequence.
|
| 346 |
+
* \param last The end of the sequence.
|
| 347 |
+
* \param comp A binary predicate used for comparison.
|
| 348 |
+
* \return An iterator pointing to the smallest element of the range <tt>[first, last)</tt>,
|
| 349 |
+
* if it is not an empty range; \p last, otherwise.
|
| 350 |
+
*
|
| 351 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 352 |
+
* and \p ForwardIterator's \c value_type is convertible to both \p comp's
|
| 353 |
+
* \c first_argument_type and \c second_argument_type.
|
| 354 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 355 |
+
*
|
| 356 |
+
* The following code snippet demonstrates how to use \p min_element to find the smallest element
|
| 357 |
+
* of a collection of key-value pairs.
|
| 358 |
+
*
|
| 359 |
+
* \code
|
| 360 |
+
* #include <thrust/extrema.h>
|
| 361 |
+
*
|
| 362 |
+
* struct key_value
|
| 363 |
+
* {
|
| 364 |
+
* int key;
|
| 365 |
+
* int value;
|
| 366 |
+
* };
|
| 367 |
+
*
|
| 368 |
+
* struct compare_key_value
|
| 369 |
+
* {
|
| 370 |
+
* __host__ __device__
|
| 371 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 372 |
+
* {
|
| 373 |
+
* return lhs.key < rhs.key;
|
| 374 |
+
* }
|
| 375 |
+
* };
|
| 376 |
+
*
|
| 377 |
+
* ...
|
| 378 |
+
* key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
|
| 379 |
+
*
|
| 380 |
+
* key_value *smallest = thrust::min_element(data, data + 4, compare_key_value());
|
| 381 |
+
*
|
| 382 |
+
* // smallest == data + 1
|
| 383 |
+
* // *smallest == {0,7}
|
| 384 |
+
* \endcode
|
| 385 |
+
*
|
| 386 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/min_element
|
| 387 |
+
*/
|
| 388 |
+
template <typename ForwardIterator, typename BinaryPredicate>
|
| 389 |
+
ForwardIterator min_element(ForwardIterator first, ForwardIterator last,
|
| 390 |
+
BinaryPredicate comp);
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
/*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
|
| 394 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 395 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value larger
|
| 396 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 397 |
+
* empty range.
|
| 398 |
+
*
|
| 399 |
+
* The two versions of \p max_element differ in how they define whether one element is
|
| 400 |
+
* greater than another. This version compares objects using \c operator<. Specifically,
|
| 401 |
+
* this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 402 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*i < *j</tt> is
|
| 403 |
+
* \c false.
|
| 404 |
+
*
|
| 405 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 406 |
+
*
|
| 407 |
+
* \param exec The execution policy to use for parallelization.
|
| 408 |
+
* \param first The beginning of the sequence.
|
| 409 |
+
* \param last The end of the sequence.
|
| 410 |
+
* \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
|
| 411 |
+
* if it is not an empty range; \p last, otherwise.
|
| 412 |
+
*
|
| 413 |
+
* \tparam A Thrust backend system.
|
| 414 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 415 |
+
* and \c ForwardIterator's \c value_type is a model of
|
| 416 |
+
* <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 417 |
+
*
|
| 418 |
+
* \code
|
| 419 |
+
* #include <thrust/extrema.h>
|
| 420 |
+
* #include <thrust/execution_policy.h>
|
| 421 |
+
* ...
|
| 422 |
+
* int data[6] = {1, 0, 2, 2, 1, 3};
|
| 423 |
+
* int *result = thrust::max_element(thrust::host, data, data + 6);
|
| 424 |
+
*
|
| 425 |
+
* // *result == 3
|
| 426 |
+
* \endcode
|
| 427 |
+
*
|
| 428 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/max_element
|
| 429 |
+
*/
|
| 430 |
+
template<typename DerivedPolicy, typename ForwardIterator>
|
| 431 |
+
__host__ __device__
|
| 432 |
+
ForwardIterator max_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last);
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
/*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
|
| 436 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 437 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value larger
|
| 438 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 439 |
+
* empty range.
|
| 440 |
+
*
|
| 441 |
+
* The two versions of \p max_element differ in how they define whether one element is
|
| 442 |
+
* greater than another. This version compares objects using \c operator<. Specifically,
|
| 443 |
+
* this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 444 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>*i < *j</tt> is
|
| 445 |
+
* \c false.
|
| 446 |
+
*
|
| 447 |
+
* \param first The beginning of the sequence.
|
| 448 |
+
* \param last The end of the sequence.
|
| 449 |
+
* \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
|
| 450 |
+
* if it is not an empty range; \p last, otherwise.
|
| 451 |
+
*
|
| 452 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 453 |
+
* and \c ForwardIterator's \c value_type is a model of
|
| 454 |
+
* <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 455 |
+
*
|
| 456 |
+
* \code
|
| 457 |
+
* #include <thrust/extrema.h>
|
| 458 |
+
* ...
|
| 459 |
+
* int data[6] = {1, 0, 2, 2, 1, 3};
|
| 460 |
+
* int *result = thrust::max_element(data, data + 6);
|
| 461 |
+
*
|
| 462 |
+
* // *result == 3
|
| 463 |
+
* \endcode
|
| 464 |
+
*
|
| 465 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/max_element
|
| 466 |
+
*/
|
| 467 |
+
template <typename ForwardIterator>
|
| 468 |
+
ForwardIterator max_element(ForwardIterator first, ForwardIterator last);
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
/*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
|
| 472 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 473 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value larger
|
| 474 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 475 |
+
* empty range.
|
| 476 |
+
*
|
| 477 |
+
* The two versions of \p max_element differ in how they define whether one element is
|
| 478 |
+
* less than another. This version compares objects using a function object \p comp.
|
| 479 |
+
* Specifically, this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 480 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*i, *j)</tt> is
|
| 481 |
+
* \c false.
|
| 482 |
+
*
|
| 483 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 484 |
+
*
|
| 485 |
+
* \param exec The execution policy to use for parallelization.
|
| 486 |
+
* \param first The beginning of the sequence.
|
| 487 |
+
* \param last The end of the sequence.
|
| 488 |
+
* \param comp A binary predicate used for comparison.
|
| 489 |
+
* \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
|
| 490 |
+
* if it is not an empty range; \p last, otherwise.
|
| 491 |
+
*
|
| 492 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 493 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 494 |
+
* and \p ForwardIterator's \c value_type is convertible to both \p comp's
|
| 495 |
+
* \c first_argument_type and \c second_argument_type.
|
| 496 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 497 |
+
*
|
| 498 |
+
* The following code snippet demonstrates how to use \p max_element to find the largest element
|
| 499 |
+
* of a collection of key-value pairs using the \p thrust::host execution policy for parallelization.
|
| 500 |
+
*
|
| 501 |
+
* \code
|
| 502 |
+
* #include <thrust/extrema.h>
|
| 503 |
+
* #include <thrust/execution_policy.h>
|
| 504 |
+
* ...
|
| 505 |
+
*
|
| 506 |
+
* struct key_value
|
| 507 |
+
* {
|
| 508 |
+
* int key;
|
| 509 |
+
* int value;
|
| 510 |
+
* };
|
| 511 |
+
*
|
| 512 |
+
* struct compare_key_value
|
| 513 |
+
* {
|
| 514 |
+
* __host__ __device__
|
| 515 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 516 |
+
* {
|
| 517 |
+
* return lhs.key < rhs.key;
|
| 518 |
+
* }
|
| 519 |
+
* };
|
| 520 |
+
*
|
| 521 |
+
* ...
|
| 522 |
+
* key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
|
| 523 |
+
*
|
| 524 |
+
* key_value *largest = thrust::max_element(thrust::host, data, data + 4, compare_key_value());
|
| 525 |
+
*
|
| 526 |
+
* // largest == data + 3
|
| 527 |
+
* // *largest == {6,1}
|
| 528 |
+
* \endcode
|
| 529 |
+
*
|
| 530 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/max_element
|
| 531 |
+
*/
|
| 532 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
|
| 533 |
+
__host__ __device__
|
| 534 |
+
ForwardIterator max_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp);
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
/*! \p max_element finds the largest element in the range <tt>[first, last)</tt>.
|
| 538 |
+
* It returns the first iterator \c i in <tt>[first, last)</tt>
|
| 539 |
+
* such that no other iterator in <tt>[first, last)</tt> points to a value larger
|
| 540 |
+
* than \c *i. The return value is \p last if and only if <tt>[first, last)</tt> is an
|
| 541 |
+
* empty range.
|
| 542 |
+
*
|
| 543 |
+
* The two versions of \p max_element differ in how they define whether one element is
|
| 544 |
+
* less than another. This version compares objects using a function object \p comp.
|
| 545 |
+
* Specifically, this version of \p max_element returns the first iterator \c i in <tt>[first, last)</tt>
|
| 546 |
+
* such that, for every iterator \c j in <tt>[first, last)</tt>, <tt>comp(*i, *j)</tt> is
|
| 547 |
+
* \c false.
|
| 548 |
+
*
|
| 549 |
+
* \param first The beginning of the sequence.
|
| 550 |
+
* \param last The end of the sequence.
|
| 551 |
+
* \param comp A binary predicate used for comparison.
|
| 552 |
+
* \return An iterator pointing to the largest element of the range <tt>[first, last)</tt>,
|
| 553 |
+
* if it is not an empty range; \p last, otherwise.
|
| 554 |
+
*
|
| 555 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 556 |
+
* and \p ForwardIterator's \c value_type is convertible to both \p comp's
|
| 557 |
+
* \c first_argument_type and \c second_argument_type.
|
| 558 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 559 |
+
*
|
| 560 |
+
* The following code snippet demonstrates how to use \p max_element to find the largest element
|
| 561 |
+
* of a collection of key-value pairs.
|
| 562 |
+
*
|
| 563 |
+
* \code
|
| 564 |
+
* #include <thrust/extrema.h>
|
| 565 |
+
*
|
| 566 |
+
* struct key_value
|
| 567 |
+
* {
|
| 568 |
+
* int key;
|
| 569 |
+
* int value;
|
| 570 |
+
* };
|
| 571 |
+
*
|
| 572 |
+
* struct compare_key_value
|
| 573 |
+
* {
|
| 574 |
+
* __host__ __device__
|
| 575 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 576 |
+
* {
|
| 577 |
+
* return lhs.key < rhs.key;
|
| 578 |
+
* }
|
| 579 |
+
* };
|
| 580 |
+
*
|
| 581 |
+
* ...
|
| 582 |
+
* key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
|
| 583 |
+
*
|
| 584 |
+
* key_value *largest = thrust::max_element(data, data + 4, compare_key_value());
|
| 585 |
+
*
|
| 586 |
+
* // largest == data + 3
|
| 587 |
+
* // *largest == {6,1}
|
| 588 |
+
* \endcode
|
| 589 |
+
*
|
| 590 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/max_element
|
| 591 |
+
*/
|
| 592 |
+
template <typename ForwardIterator, typename BinaryPredicate>
|
| 593 |
+
ForwardIterator max_element(ForwardIterator first, ForwardIterator last,
|
| 594 |
+
BinaryPredicate comp);
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
/*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
|
| 598 |
+
* It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
|
| 599 |
+
* returned by \p min_element and \c imax is the same iterator returned by \p max_element.
|
| 600 |
+
* This function is potentially more efficient than separate calls to \p min_element and \p max_element.
|
| 601 |
+
*
|
| 602 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 603 |
+
*
|
| 604 |
+
* \param exec The execution policy to use for parallelization.
|
| 605 |
+
* \param first The beginning of the sequence.
|
| 606 |
+
* \param last The end of the sequence.
|
| 607 |
+
* \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
|
| 608 |
+
* if it is not an empty range; \p last, otherwise.
|
| 609 |
+
*
|
| 610 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 611 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 612 |
+
* and \c ForwardIterator's \c value_type is a model of
|
| 613 |
+
* <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 614 |
+
*
|
| 615 |
+
* \code
|
| 616 |
+
* #include <thrust/extrema.h>
|
| 617 |
+
* #include <thrust/execution_policy.h>
|
| 618 |
+
* ...
|
| 619 |
+
* int data[6] = {1, 0, 2, 2, 1, 3};
|
| 620 |
+
* thrust::pair<int *, int *> result = thrust::minmax_element(thrust::host, data, data + 6);
|
| 621 |
+
*
|
| 622 |
+
* // result.first is data + 1
|
| 623 |
+
* // result.second is data + 5
|
| 624 |
+
* // *result.first is 0
|
| 625 |
+
* // *result.second is 3
|
| 626 |
+
* \endcode
|
| 627 |
+
*
|
| 628 |
+
* \see min_element
|
| 629 |
+
* \see max_element
|
| 630 |
+
* \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
|
| 631 |
+
*/
|
| 632 |
+
template<typename DerivedPolicy, typename ForwardIterator>
|
| 633 |
+
__host__ __device__
|
| 634 |
+
thrust::pair<ForwardIterator,ForwardIterator> minmax_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last);
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
/*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
|
| 638 |
+
* It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
|
| 639 |
+
* returned by \p min_element and \c imax is the same iterator returned by \p max_element.
|
| 640 |
+
* This function is potentially more efficient than separate calls to \p min_element and \p max_element.
|
| 641 |
+
*
|
| 642 |
+
* \param first The beginning of the sequence.
|
| 643 |
+
* \param last The end of the sequence.
|
| 644 |
+
* \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
|
| 645 |
+
* if it is not an empty range; \p last, otherwise.
|
| 646 |
+
*
|
| 647 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 648 |
+
* and \c ForwardIterator's \c value_type is a model of
|
| 649 |
+
* <a href="https://en.cppreference.com/w/cpp/named_req/LessThanComparable">LessThan Comparable</a>.
|
| 650 |
+
*
|
| 651 |
+
* \code
|
| 652 |
+
* #include <thrust/extrema.h>
|
| 653 |
+
* ...
|
| 654 |
+
* int data[6] = {1, 0, 2, 2, 1, 3};
|
| 655 |
+
* thrust::pair<int *, int *> result = thrust::minmax_element(data, data + 6);
|
| 656 |
+
*
|
| 657 |
+
* // result.first is data + 1
|
| 658 |
+
* // result.second is data + 5
|
| 659 |
+
* // *result.first is 0
|
| 660 |
+
* // *result.second is 3
|
| 661 |
+
* \endcode
|
| 662 |
+
*
|
| 663 |
+
* \see min_element
|
| 664 |
+
* \see max_element
|
| 665 |
+
* \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
|
| 666 |
+
*/
|
| 667 |
+
template <typename ForwardIterator>
|
| 668 |
+
thrust::pair<ForwardIterator,ForwardIterator> minmax_element(ForwardIterator first,
|
| 669 |
+
ForwardIterator last);
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
/*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
|
| 673 |
+
* It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
|
| 674 |
+
* returned by \p min_element and \c imax is the same iterator returned by \p max_element.
|
| 675 |
+
* This function is potentially more efficient than separate calls to \p min_element and \p max_element.
|
| 676 |
+
*
|
| 677 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 678 |
+
*
|
| 679 |
+
* \param exec The execution policy to use for parallelization.
|
| 680 |
+
* \param first The beginning of the sequence.
|
| 681 |
+
* \param last The end of the sequence.
|
| 682 |
+
* \param comp A binary predicate used for comparison.
|
| 683 |
+
* \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
|
| 684 |
+
* if it is not an empty range; \p last, otherwise.
|
| 685 |
+
*
|
| 686 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 687 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 688 |
+
* and \p ForwardIterator's \c value_type is convertible to both \p comp's
|
| 689 |
+
* \c first_argument_type and \c second_argument_type.
|
| 690 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 691 |
+
*
|
| 692 |
+
* The following code snippet demonstrates how to use \p minmax_element to find the smallest and largest elements
|
| 693 |
+
* of a collection of key-value pairs using the \p thrust::host execution policy for parallelization:
|
| 694 |
+
*
|
| 695 |
+
* \code
|
| 696 |
+
* #include <thrust/extrema.h>
|
| 697 |
+
* #include <thrust/pair.h>
|
| 698 |
+
* #include <thrust/execution_policy.h>
|
| 699 |
+
* ...
|
| 700 |
+
*
|
| 701 |
+
* struct key_value
|
| 702 |
+
* {
|
| 703 |
+
* int key;
|
| 704 |
+
* int value;
|
| 705 |
+
* };
|
| 706 |
+
*
|
| 707 |
+
* struct compare_key_value
|
| 708 |
+
* {
|
| 709 |
+
* __host__ __device__
|
| 710 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 711 |
+
* {
|
| 712 |
+
* return lhs.key < rhs.key;
|
| 713 |
+
* }
|
| 714 |
+
* };
|
| 715 |
+
*
|
| 716 |
+
* ...
|
| 717 |
+
* key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
|
| 718 |
+
*
|
| 719 |
+
* thrust::pair<key_value*,key_value*> extrema = thrust::minmax_element(thrust::host, data, data + 4, compare_key_value());
|
| 720 |
+
*
|
| 721 |
+
* // extrema.first == data + 1
|
| 722 |
+
* // *extrema.first == {0,7}
|
| 723 |
+
* // extrema.second == data + 3
|
| 724 |
+
* // *extrema.second == {6,1}
|
| 725 |
+
* \endcode
|
| 726 |
+
*
|
| 727 |
+
* \see min_element
|
| 728 |
+
* \see max_element
|
| 729 |
+
* \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
|
| 730 |
+
*/
|
| 731 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename BinaryPredicate>
|
| 732 |
+
__host__ __device__
|
| 733 |
+
thrust::pair<ForwardIterator,ForwardIterator> minmax_element(const thrust::detail::execution_policy_base<DerivedPolicy> &exec, ForwardIterator first, ForwardIterator last, BinaryPredicate comp);
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
/*! \p minmax_element finds the smallest and largest elements in the range <tt>[first, last)</tt>.
|
| 737 |
+
* It returns a pair of iterators <tt>(imin, imax)</tt> where \c imin is the same iterator
|
| 738 |
+
* returned by \p min_element and \c imax is the same iterator returned by \p max_element.
|
| 739 |
+
* This function is potentially more efficient than separate calls to \p min_element and \p max_element.
|
| 740 |
+
*
|
| 741 |
+
* \param first The beginning of the sequence.
|
| 742 |
+
* \param last The end of the sequence.
|
| 743 |
+
* \param comp A binary predicate used for comparison.
|
| 744 |
+
* \return A pair of iterator pointing to the smallest and largest elements of the range <tt>[first, last)</tt>,
|
| 745 |
+
* if it is not an empty range; \p last, otherwise.
|
| 746 |
+
*
|
| 747 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 748 |
+
* and \p ForwardIterator's \c value_type is convertible to both \p comp's
|
| 749 |
+
* \c first_argument_type and \c second_argument_type.
|
| 750 |
+
* \tparam BinaryPredicate is a model of <a href="https://en.cppreference.com/w/cpp/named_req/BinaryPredicate">Binary Predicate</a>.
|
| 751 |
+
*
|
| 752 |
+
* The following code snippet demonstrates how to use \p minmax_element to find the smallest and largest elements
|
| 753 |
+
* of a collection of key-value pairs.
|
| 754 |
+
*
|
| 755 |
+
* \code
|
| 756 |
+
* #include <thrust/extrema.h>
|
| 757 |
+
* #include <thrust/pair.h>
|
| 758 |
+
*
|
| 759 |
+
* struct key_value
|
| 760 |
+
* {
|
| 761 |
+
* int key;
|
| 762 |
+
* int value;
|
| 763 |
+
* };
|
| 764 |
+
*
|
| 765 |
+
* struct compare_key_value
|
| 766 |
+
* {
|
| 767 |
+
* __host__ __device__
|
| 768 |
+
* bool operator()(key_value lhs, key_value rhs)
|
| 769 |
+
* {
|
| 770 |
+
* return lhs.key < rhs.key;
|
| 771 |
+
* }
|
| 772 |
+
* };
|
| 773 |
+
*
|
| 774 |
+
* ...
|
| 775 |
+
* key_value data[4] = { {4,5}, {0,7}, {2,3}, {6,1} };
|
| 776 |
+
*
|
| 777 |
+
* thrust::pair<key_value*,key_value*> extrema = thrust::minmax_element(data, data + 4, compare_key_value());
|
| 778 |
+
*
|
| 779 |
+
* // extrema.first == data + 1
|
| 780 |
+
* // *extrema.first == {0,7}
|
| 781 |
+
* // extrema.second == data + 3
|
| 782 |
+
* // *extrema.second == {6,1}
|
| 783 |
+
* \endcode
|
| 784 |
+
*
|
| 785 |
+
* \see min_element
|
| 786 |
+
* \see max_element
|
| 787 |
+
* \see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1840.pdf
|
| 788 |
+
*/
|
| 789 |
+
template <typename ForwardIterator, typename BinaryPredicate>
|
| 790 |
+
thrust::pair<ForwardIterator,ForwardIterator> minmax_element(ForwardIterator first,
|
| 791 |
+
ForwardIterator last,
|
| 792 |
+
BinaryPredicate comp);
|
| 793 |
+
|
| 794 |
+
/*! \} // end extrema
|
| 795 |
+
* \} // end reductions
|
| 796 |
+
*/
|
| 797 |
+
|
| 798 |
+
THRUST_NAMESPACE_END
|
| 799 |
+
|
| 800 |
+
#include <thrust/detail/extrema.inl>
|
| 801 |
+
#include <thrust/detail/minmax.h>
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/fill.h
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file fill.h
|
| 19 |
+
* \brief Fills a range with a constant value
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup transformations
|
| 30 |
+
* \addtogroup filling
|
| 31 |
+
* \ingroup transformations
|
| 32 |
+
* \{
|
| 33 |
+
*/
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
/*! \p fill assigns the value \p value to every element in
|
| 37 |
+
* the range <tt>[first, last)</tt>. That is, for every
|
| 38 |
+
* iterator \c i in <tt>[first, last)</tt>, it performs
|
| 39 |
+
* the assignment <tt>*i = value</tt>.
|
| 40 |
+
*
|
| 41 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 42 |
+
*
|
| 43 |
+
* \param exec The execution policy to use for parallelization.
|
| 44 |
+
* \param first The beginning of the sequence.
|
| 45 |
+
* \param last The end of the sequence.
|
| 46 |
+
* \param value The value to be copied.
|
| 47 |
+
*
|
| 48 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 49 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 50 |
+
* and \p ForwardIterator is mutable.
|
| 51 |
+
* \tparam T is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 52 |
+
* and \p T's \c value_type is convertible to \p ForwardIterator's \c value_type.
|
| 53 |
+
*
|
| 54 |
+
* The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
|
| 55 |
+
* elements to a given value using the \p thrust::device execution policy for parallelization:
|
| 56 |
+
*
|
| 57 |
+
* \code
|
| 58 |
+
* #include <thrust/fill.h>
|
| 59 |
+
* #include <thrust/device_vector.h>
|
| 60 |
+
* #include <thrust/execution_policy.h>
|
| 61 |
+
* ...
|
| 62 |
+
* thrust::device_vector<int> v(4);
|
| 63 |
+
* thrust::fill(thrust::device, v.begin(), v.end(), 137);
|
| 64 |
+
*
|
| 65 |
+
* // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
|
| 66 |
+
* \endcode
|
| 67 |
+
*
|
| 68 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/fill
|
| 69 |
+
* \see \c fill_n
|
| 70 |
+
* \see \c uninitialized_fill
|
| 71 |
+
*/
|
| 72 |
+
template<typename DerivedPolicy, typename ForwardIterator, typename T>
|
| 73 |
+
__host__ __device__
|
| 74 |
+
void fill(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 75 |
+
ForwardIterator first,
|
| 76 |
+
ForwardIterator last,
|
| 77 |
+
const T &value);
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
/*! \p fill assigns the value \p value to every element in
|
| 81 |
+
* the range <tt>[first, last)</tt>. That is, for every
|
| 82 |
+
* iterator \c i in <tt>[first, last)</tt>, it performs
|
| 83 |
+
* the assignment <tt>*i = value</tt>.
|
| 84 |
+
*
|
| 85 |
+
* \param first The beginning of the sequence.
|
| 86 |
+
* \param last The end of the sequence.
|
| 87 |
+
* \param value The value to be copied.
|
| 88 |
+
*
|
| 89 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 90 |
+
* and \p ForwardIterator is mutable.
|
| 91 |
+
* \tparam T is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 92 |
+
* and \p T's \c value_type is convertible to \p ForwardIterator's \c value_type.
|
| 93 |
+
*
|
| 94 |
+
* The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
|
| 95 |
+
* elements to a given value.
|
| 96 |
+
*
|
| 97 |
+
* \code
|
| 98 |
+
* #include <thrust/fill.h>
|
| 99 |
+
* #include <thrust/device_vector.h>
|
| 100 |
+
* ...
|
| 101 |
+
* thrust::device_vector<int> v(4);
|
| 102 |
+
* thrust::fill(v.begin(), v.end(), 137);
|
| 103 |
+
*
|
| 104 |
+
* // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
|
| 105 |
+
* \endcode
|
| 106 |
+
*
|
| 107 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/fill
|
| 108 |
+
* \see \c fill_n
|
| 109 |
+
* \see \c uninitialized_fill
|
| 110 |
+
*/
|
| 111 |
+
template<typename ForwardIterator, typename T>
|
| 112 |
+
__host__ __device__
|
| 113 |
+
void fill(ForwardIterator first,
|
| 114 |
+
ForwardIterator last,
|
| 115 |
+
const T &value);
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
/*! \p fill_n assigns the value \p value to every element in
|
| 119 |
+
* the range <tt>[first, first+n)</tt>. That is, for every
|
| 120 |
+
* iterator \c i in <tt>[first, first+n)</tt>, it performs
|
| 121 |
+
* the assignment <tt>*i = value</tt>.
|
| 122 |
+
*
|
| 123 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 124 |
+
*
|
| 125 |
+
* \param exec The execution policy to use for parallelization.
|
| 126 |
+
* \param first The beginning of the sequence.
|
| 127 |
+
* \param n The size of the sequence.
|
| 128 |
+
* \param value The value to be copied.
|
| 129 |
+
* \return <tt>first + n</tt>
|
| 130 |
+
*
|
| 131 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 132 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 133 |
+
* \tparam T is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 134 |
+
* and \p T's \c value_type is convertible to a type in \p OutputIterator's set of \c value_type.
|
| 135 |
+
*
|
| 136 |
+
* The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
|
| 137 |
+
* elements to a given value using the \p thrust::device execution policy for parallelization:
|
| 138 |
+
*
|
| 139 |
+
* \code
|
| 140 |
+
* #include <thrust/fill.h>
|
| 141 |
+
* #include <thrust/device_vector.h>
|
| 142 |
+
* #include <thrust/execution_policy.h>
|
| 143 |
+
* ...
|
| 144 |
+
* thrust::device_vector<int> v(4);
|
| 145 |
+
* thrust::fill_n(thrust::device, v.begin(), v.size(), 137);
|
| 146 |
+
*
|
| 147 |
+
* // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
|
| 148 |
+
* \endcode
|
| 149 |
+
*
|
| 150 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/fill_n
|
| 151 |
+
* \see \c fill
|
| 152 |
+
* \see \c uninitialized_fill_n
|
| 153 |
+
*/
|
| 154 |
+
template<typename DerivedPolicy, typename OutputIterator, typename Size, typename T>
|
| 155 |
+
__host__ __device__
|
| 156 |
+
OutputIterator fill_n(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 157 |
+
OutputIterator first,
|
| 158 |
+
Size n,
|
| 159 |
+
const T &value);
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
/*! \p fill_n assigns the value \p value to every element in
|
| 163 |
+
* the range <tt>[first, first+n)</tt>. That is, for every
|
| 164 |
+
* iterator \c i in <tt>[first, first+n)</tt>, it performs
|
| 165 |
+
* the assignment <tt>*i = value</tt>.
|
| 166 |
+
*
|
| 167 |
+
* \param first The beginning of the sequence.
|
| 168 |
+
* \param n The size of the sequence.
|
| 169 |
+
* \param value The value to be copied.
|
| 170 |
+
* \return <tt>first + n</tt>
|
| 171 |
+
*
|
| 172 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 173 |
+
* \tparam T is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 174 |
+
* and \p T's \c value_type is convertible to a type in \p OutputIterator's set of \c value_type.
|
| 175 |
+
*
|
| 176 |
+
* The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
|
| 177 |
+
* elements to a given value.
|
| 178 |
+
*
|
| 179 |
+
* \code
|
| 180 |
+
* #include <thrust/fill.h>
|
| 181 |
+
* #include <thrust/device_vector.h>
|
| 182 |
+
* ...
|
| 183 |
+
* thrust::device_vector<int> v(4);
|
| 184 |
+
* thrust::fill_n(v.begin(), v.size(), 137);
|
| 185 |
+
*
|
| 186 |
+
* // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
|
| 187 |
+
* \endcode
|
| 188 |
+
*
|
| 189 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/fill_n
|
| 190 |
+
* \see \c fill
|
| 191 |
+
* \see \c uninitialized_fill_n
|
| 192 |
+
*/
|
| 193 |
+
template<typename OutputIterator, typename Size, typename T>
|
| 194 |
+
__host__ __device__
|
| 195 |
+
OutputIterator fill_n(OutputIterator first,
|
| 196 |
+
Size n,
|
| 197 |
+
const T &value);
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
/*! \} // end filling
|
| 201 |
+
* \} // transformations
|
| 202 |
+
*/
|
| 203 |
+
|
| 204 |
+
THRUST_NAMESPACE_END
|
| 205 |
+
|
| 206 |
+
#include <thrust/detail/fill.inl>
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/for_each.h
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
* * Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
* See the License for the specific language governing permissions and
|
| 13 |
+
* limitations under the License.
|
| 14 |
+
*/
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
/*! \file thrust/for_each.h
|
| 18 |
+
* \brief Applies a function to each element in a range
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/detail/type_traits.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup modifying
|
| 30 |
+
* \ingroup transformations
|
| 31 |
+
* \{
|
| 32 |
+
*/
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
/*! \p for_each applies the function object \p f to each element
|
| 36 |
+
* in the range <tt>[first, last)</tt>; \p f's return value, if any,
|
| 37 |
+
* is ignored. Unlike the C++ Standard Template Library function
|
| 38 |
+
* <tt>std::for_each</tt>, this version offers no guarantee on
|
| 39 |
+
* order of execution. For this reason, this version of \p for_each
|
| 40 |
+
* does not return a copy of the function object.
|
| 41 |
+
*
|
| 42 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 43 |
+
*
|
| 44 |
+
* \param exec The execution policy to use for parallelization.
|
| 45 |
+
* \param first The beginning of the sequence.
|
| 46 |
+
* \param last The end of the sequence.
|
| 47 |
+
* \param f The function object to apply to the range <tt>[first, last)</tt>.
|
| 48 |
+
* \return last
|
| 49 |
+
*
|
| 50 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 51 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/named_req/InputIterator">Input Iterator</a>,
|
| 52 |
+
* and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
|
| 53 |
+
* \tparam UnaryFunction is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/unary_function">Unary Function</a>,
|
| 54 |
+
* and \p UnaryFunction does not apply any non-constant operation through its argument.
|
| 55 |
+
*
|
| 56 |
+
* The following code snippet demonstrates how to use \p for_each to print the elements
|
| 57 |
+
* of a \p thrust::device_vector using the \p thrust::device parallelization policy:
|
| 58 |
+
*
|
| 59 |
+
* \code
|
| 60 |
+
* #include <thrust/for_each.h>
|
| 61 |
+
* #include <thrust/device_vector.h>
|
| 62 |
+
* #include <thrust/execution_policy.h>
|
| 63 |
+
* #include <cstdio>
|
| 64 |
+
* ...
|
| 65 |
+
*
|
| 66 |
+
* struct printf_functor
|
| 67 |
+
* {
|
| 68 |
+
* __host__ __device__
|
| 69 |
+
* void operator()(int x)
|
| 70 |
+
* {
|
| 71 |
+
* // note that using printf in a __device__ function requires
|
| 72 |
+
* // code compiled for a GPU with compute capability 2.0 or
|
| 73 |
+
* // higher (nvcc --arch=sm_20)
|
| 74 |
+
* printf("%d\n", x);
|
| 75 |
+
* }
|
| 76 |
+
* };
|
| 77 |
+
* ...
|
| 78 |
+
* thrust::device_vector<int> d_vec(3);
|
| 79 |
+
* d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
|
| 80 |
+
*
|
| 81 |
+
* thrust::for_each(thrust::device, d_vec.begin(), d_vec.end(), printf_functor());
|
| 82 |
+
*
|
| 83 |
+
* // 0 1 2 is printed to standard output in some unspecified order
|
| 84 |
+
* \endcode
|
| 85 |
+
*
|
| 86 |
+
* \see for_each_n
|
| 87 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/for_each
|
| 88 |
+
*/
|
| 89 |
+
template<typename DerivedPolicy,
|
| 90 |
+
typename InputIterator,
|
| 91 |
+
typename UnaryFunction>
|
| 92 |
+
__host__ __device__
|
| 93 |
+
InputIterator for_each(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 94 |
+
InputIterator first,
|
| 95 |
+
InputIterator last,
|
| 96 |
+
UnaryFunction f);
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
/*! \p for_each_n applies the function object \p f to each element
|
| 100 |
+
* in the range <tt>[first, first + n)</tt>; \p f's return value, if any,
|
| 101 |
+
* is ignored. Unlike the C++ Standard Template Library function
|
| 102 |
+
* <tt>std::for_each</tt>, this version offers no guarantee on
|
| 103 |
+
* order of execution.
|
| 104 |
+
*
|
| 105 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 106 |
+
*
|
| 107 |
+
* \param exec The execution policy to use for parallelization.
|
| 108 |
+
* \param first The beginning of the sequence.
|
| 109 |
+
* \param n The size of the input sequence.
|
| 110 |
+
* \param f The function object to apply to the range <tt>[first, first + n)</tt>.
|
| 111 |
+
* \return <tt>first + n</tt>
|
| 112 |
+
*
|
| 113 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 114 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/named_req/InputIterator">Input Iterator</a>,
|
| 115 |
+
* and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
|
| 116 |
+
* \tparam Size is an integral type.
|
| 117 |
+
* \tparam UnaryFunction is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/unary_function">Unary Function</a>,
|
| 118 |
+
* and \p UnaryFunction does not apply any non-constant operation through its argument.
|
| 119 |
+
*
|
| 120 |
+
* The following code snippet demonstrates how to use \p for_each_n to print the elements
|
| 121 |
+
* of a \p device_vector using the \p thrust::device parallelization policy.
|
| 122 |
+
*
|
| 123 |
+
* \code
|
| 124 |
+
* #include <thrust/for_each.h>
|
| 125 |
+
* #include <thrust/device_vector.h>
|
| 126 |
+
* #include <thrust/execution_policy.h>
|
| 127 |
+
* #include <cstdio>
|
| 128 |
+
*
|
| 129 |
+
* struct printf_functor
|
| 130 |
+
* {
|
| 131 |
+
* __host__ __device__
|
| 132 |
+
* void operator()(int x)
|
| 133 |
+
* {
|
| 134 |
+
* // note that using printf in a __device__ function requires
|
| 135 |
+
* // code compiled for a GPU with compute capability 2.0 or
|
| 136 |
+
* // higher (nvcc --arch=sm_20)
|
| 137 |
+
* printf("%d\n", x);
|
| 138 |
+
* }
|
| 139 |
+
* };
|
| 140 |
+
* ...
|
| 141 |
+
* thrust::device_vector<int> d_vec(3);
|
| 142 |
+
* d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
|
| 143 |
+
*
|
| 144 |
+
* thrust::for_each_n(thrust::device, d_vec.begin(), d_vec.size(), printf_functor());
|
| 145 |
+
*
|
| 146 |
+
* // 0 1 2 is printed to standard output in some unspecified order
|
| 147 |
+
* \endcode
|
| 148 |
+
*
|
| 149 |
+
* \see for_each
|
| 150 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/for_each
|
| 151 |
+
*/
|
| 152 |
+
template<typename DerivedPolicy,
|
| 153 |
+
typename InputIterator,
|
| 154 |
+
typename Size,
|
| 155 |
+
typename UnaryFunction>
|
| 156 |
+
__host__ __device__
|
| 157 |
+
InputIterator for_each_n(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 158 |
+
InputIterator first,
|
| 159 |
+
Size n,
|
| 160 |
+
UnaryFunction f);
|
| 161 |
+
|
| 162 |
+
/*! \p for_each applies the function object \p f to each element
|
| 163 |
+
* in the range <tt>[first, last)</tt>; \p f's return value, if any,
|
| 164 |
+
* is ignored. Unlike the C++ Standard Template Library function
|
| 165 |
+
* <tt>std::for_each</tt>, this version offers no guarantee on
|
| 166 |
+
* order of execution. For this reason, this version of \p for_each
|
| 167 |
+
* does not return a copy of the function object.
|
| 168 |
+
*
|
| 169 |
+
* \param first The beginning of the sequence.
|
| 170 |
+
* \param last The end of the sequence.
|
| 171 |
+
* \param f The function object to apply to the range <tt>[first, last)</tt>.
|
| 172 |
+
* \return last
|
| 173 |
+
*
|
| 174 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/named_req/InputIterator">Input Iterator</a>,
|
| 175 |
+
* and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
|
| 176 |
+
* \tparam UnaryFunction is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/unary_function">Unary Function</a>,
|
| 177 |
+
* and \p UnaryFunction does not apply any non-constant operation through its argument.
|
| 178 |
+
*
|
| 179 |
+
* The following code snippet demonstrates how to use \p for_each to print the elements
|
| 180 |
+
* of a \p device_vector.
|
| 181 |
+
*
|
| 182 |
+
* \code
|
| 183 |
+
* #include <thrust/for_each.h>
|
| 184 |
+
* #include <thrust/device_vector.h>
|
| 185 |
+
* #include <stdio.h>
|
| 186 |
+
*
|
| 187 |
+
* struct printf_functor
|
| 188 |
+
* {
|
| 189 |
+
* __host__ __device__
|
| 190 |
+
* void operator()(int x)
|
| 191 |
+
* {
|
| 192 |
+
* // note that using printf in a __device__ function requires
|
| 193 |
+
* // code compiled for a GPU with compute capability 2.0 or
|
| 194 |
+
* // higher (nvcc --arch=sm_20)
|
| 195 |
+
* printf("%d\n", x);
|
| 196 |
+
* }
|
| 197 |
+
* };
|
| 198 |
+
* ...
|
| 199 |
+
* thrust::device_vector<int> d_vec(3);
|
| 200 |
+
* d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
|
| 201 |
+
*
|
| 202 |
+
* thrust::for_each(d_vec.begin(), d_vec.end(), printf_functor());
|
| 203 |
+
*
|
| 204 |
+
* // 0 1 2 is printed to standard output in some unspecified order
|
| 205 |
+
* \endcode
|
| 206 |
+
*
|
| 207 |
+
* \see for_each_n
|
| 208 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/for_each
|
| 209 |
+
*/
|
| 210 |
+
template<typename InputIterator,
|
| 211 |
+
typename UnaryFunction>
|
| 212 |
+
InputIterator for_each(InputIterator first,
|
| 213 |
+
InputIterator last,
|
| 214 |
+
UnaryFunction f);
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
/*! \p for_each_n applies the function object \p f to each element
|
| 218 |
+
* in the range <tt>[first, first + n)</tt>; \p f's return value, if any,
|
| 219 |
+
* is ignored. Unlike the C++ Standard Template Library function
|
| 220 |
+
* <tt>std::for_each</tt>, this version offers no guarantee on
|
| 221 |
+
* order of execution.
|
| 222 |
+
*
|
| 223 |
+
* \param first The beginning of the sequence.
|
| 224 |
+
* \param n The size of the input sequence.
|
| 225 |
+
* \param f The function object to apply to the range <tt>[first, first + n)</tt>.
|
| 226 |
+
* \return <tt>first + n</tt>
|
| 227 |
+
*
|
| 228 |
+
* \tparam InputIterator is a model of <a href="https://en.cppreference.com/w/cpp/named_req/InputIterator">Input Iterator</a>,
|
| 229 |
+
* and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
|
| 230 |
+
* \tparam Size is an integral type.
|
| 231 |
+
* \tparam UnaryFunction is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/unary_function">Unary Function</a>,
|
| 232 |
+
* and \p UnaryFunction does not apply any non-constant operation through its argument.
|
| 233 |
+
*
|
| 234 |
+
* The following code snippet demonstrates how to use \p for_each_n to print the elements
|
| 235 |
+
* of a \p device_vector.
|
| 236 |
+
*
|
| 237 |
+
* \code
|
| 238 |
+
* #include <thrust/for_each.h>
|
| 239 |
+
* #include <thrust/device_vector.h>
|
| 240 |
+
* #include <stdio.h>
|
| 241 |
+
*
|
| 242 |
+
* struct printf_functor
|
| 243 |
+
* {
|
| 244 |
+
* __host__ __device__
|
| 245 |
+
* void operator()(int x)
|
| 246 |
+
* {
|
| 247 |
+
* // note that using printf in a __device__ function requires
|
| 248 |
+
* // code compiled for a GPU with compute capability 2.0 or
|
| 249 |
+
* // higher (nvcc --arch=sm_20)
|
| 250 |
+
* printf("%d\n", x);
|
| 251 |
+
* }
|
| 252 |
+
* };
|
| 253 |
+
* ...
|
| 254 |
+
* thrust::device_vector<int> d_vec(3);
|
| 255 |
+
* d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
|
| 256 |
+
*
|
| 257 |
+
* thrust::for_each_n(d_vec.begin(), d_vec.size(), printf_functor());
|
| 258 |
+
*
|
| 259 |
+
* // 0 1 2 is printed to standard output in some unspecified order
|
| 260 |
+
* \endcode
|
| 261 |
+
*
|
| 262 |
+
* \see for_each
|
| 263 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/for_each
|
| 264 |
+
*/
|
| 265 |
+
template<typename InputIterator,
|
| 266 |
+
typename Size,
|
| 267 |
+
typename UnaryFunction>
|
| 268 |
+
InputIterator for_each_n(InputIterator first,
|
| 269 |
+
Size n,
|
| 270 |
+
UnaryFunction f);
|
| 271 |
+
|
| 272 |
+
/*! \} // end modifying
|
| 273 |
+
*/
|
| 274 |
+
|
| 275 |
+
THRUST_NAMESPACE_END
|
| 276 |
+
|
| 277 |
+
#include <thrust/detail/for_each.inl>
|
| 278 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/future.h
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file thrust/future.h
|
| 18 |
+
* \brief `thrust::future`, an asynchronous value type.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/detail/cpp14_required.h>
|
| 25 |
+
|
| 26 |
+
#if THRUST_CPP_DIALECT >= 2014
|
| 27 |
+
|
| 28 |
+
#include <thrust/execution_policy.h>
|
| 29 |
+
#include <thrust/detail/static_assert.h>
|
| 30 |
+
|
| 31 |
+
#include <utility>
|
| 32 |
+
|
| 33 |
+
/*
|
| 34 |
+
// #include the host system's pointer.h header.
|
| 35 |
+
#define __THRUST_HOST_SYSTEM_POINTER_HEADER <__THRUST_HOST_SYSTEM_ROOT/pointer.h>
|
| 36 |
+
#include __THRUST_HOST_SYSTEM_POINTER_HEADER
|
| 37 |
+
#undef __THRUST_HOST_SYSTEM_POINTER_HEADER
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
// #include the device system's pointer.h header.
|
| 41 |
+
#define __THRUST_DEVICE_SYSTEM_POINTER_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/pointer.h>
|
| 42 |
+
#include __THRUST_DEVICE_SYSTEM_POINTER_HEADER
|
| 43 |
+
#undef __THRUST_DEVICE_SYSTEM_POINTER_HEADER
|
| 44 |
+
|
| 45 |
+
/*
|
| 46 |
+
// #include the host system's future.h header.
|
| 47 |
+
#define __THRUST_HOST_SYSTEM_FUTURE_HEADER <__THRUST_HOST_SYSTEM_ROOT/future.h>
|
| 48 |
+
#include __THRUST_HOST_SYSTEM_FUTURE_HEADER
|
| 49 |
+
#undef __THRUST_HOST_SYSTEM_FUTURE_HEADER
|
| 50 |
+
*/
|
| 51 |
+
|
| 52 |
+
// #include the device system's future.h header.
|
| 53 |
+
#define __THRUST_DEVICE_SYSTEM_FUTURE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/future.h>
|
| 54 |
+
#include __THRUST_DEVICE_SYSTEM_FUTURE_HEADER
|
| 55 |
+
#undef __THRUST_DEVICE_SYSTEM_FUTURE_HEADER
|
| 56 |
+
|
| 57 |
+
THRUST_NAMESPACE_BEGIN
|
| 58 |
+
|
| 59 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 60 |
+
|
| 61 |
+
// `select_unique_(future|event)_type` is a hook for choosing the
|
| 62 |
+
// `unique_eager_event`/`unique_eager_future` type for a system. `decltype` is
|
| 63 |
+
// used to determine the return type of an ADL call to
|
| 64 |
+
// `select_unique_eager_(future|event)_type(system)`; that return type should
|
| 65 |
+
// be the correct event/future type for `system`. Overloads should only be
|
| 66 |
+
// declared, not defined.
|
| 67 |
+
|
| 68 |
+
namespace unimplemented
|
| 69 |
+
{
|
| 70 |
+
|
| 71 |
+
struct no_unique_eager_event_type_found {};
|
| 72 |
+
|
| 73 |
+
inline __host__
|
| 74 |
+
no_unique_eager_event_type_found
|
| 75 |
+
unique_eager_event_type(...) noexcept;
|
| 76 |
+
|
| 77 |
+
struct no_unique_eager_future_type_found {};
|
| 78 |
+
|
| 79 |
+
template <typename T>
|
| 80 |
+
__host__
|
| 81 |
+
no_unique_eager_future_type_found
|
| 82 |
+
unique_eager_future_type(...) noexcept;
|
| 83 |
+
|
| 84 |
+
} // namespace unimplemented
|
| 85 |
+
|
| 86 |
+
namespace unique_eager_event_type_detail
|
| 87 |
+
{
|
| 88 |
+
|
| 89 |
+
using unimplemented::unique_eager_event_type;
|
| 90 |
+
|
| 91 |
+
template <typename System>
|
| 92 |
+
using select = decltype(
|
| 93 |
+
unique_eager_event_type(std::declval<System>())
|
| 94 |
+
);
|
| 95 |
+
|
| 96 |
+
} // namespace unique_eager_event_type_detail
|
| 97 |
+
|
| 98 |
+
namespace unique_eager_future_type_detail
|
| 99 |
+
{
|
| 100 |
+
|
| 101 |
+
using unimplemented::unique_eager_future_type;
|
| 102 |
+
|
| 103 |
+
template <typename System, typename T>
|
| 104 |
+
using select = decltype(
|
| 105 |
+
unique_eager_future_type<T>(std::declval<System>())
|
| 106 |
+
);
|
| 107 |
+
|
| 108 |
+
} // namespace unique_eager_future_type_detail
|
| 109 |
+
|
| 110 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 111 |
+
|
| 112 |
+
template <typename System>
|
| 113 |
+
using unique_eager_event = unique_eager_event_type_detail::select<System>;
|
| 114 |
+
|
| 115 |
+
template <typename System>
|
| 116 |
+
using event = unique_eager_event<System>;
|
| 117 |
+
|
| 118 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 119 |
+
|
| 120 |
+
template <typename System, typename T>
|
| 121 |
+
using unique_eager_future = unique_eager_future_type_detail::select<System, T>;
|
| 122 |
+
|
| 123 |
+
template <typename System, typename T>
|
| 124 |
+
using future = unique_eager_future<System, T>;
|
| 125 |
+
|
| 126 |
+
/*
|
| 127 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 128 |
+
|
| 129 |
+
using host_unique_eager_event = unique_eager_event_type_detail::select<
|
| 130 |
+
thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::tag
|
| 131 |
+
>;
|
| 132 |
+
using host_event = host_unique_eager_event;
|
| 133 |
+
|
| 134 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 135 |
+
|
| 136 |
+
template <typename T>
|
| 137 |
+
using host_unique_eager_future = unique_eager_future_type_detail::select<
|
| 138 |
+
thrust::system::__THRUST_HOST_SYSTEM_NAMESPACE::tag, T
|
| 139 |
+
>;
|
| 140 |
+
template <typename T>
|
| 141 |
+
using host_future = host_unique_eager_future<T>;
|
| 142 |
+
*/
|
| 143 |
+
|
| 144 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 145 |
+
|
| 146 |
+
using device_unique_eager_event = unique_eager_event_type_detail::select<
|
| 147 |
+
thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag
|
| 148 |
+
>;
|
| 149 |
+
|
| 150 |
+
using device_event = device_unique_eager_event;
|
| 151 |
+
|
| 152 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 153 |
+
|
| 154 |
+
template <typename T>
|
| 155 |
+
using device_unique_eager_future = unique_eager_future_type_detail::select<
|
| 156 |
+
thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag, T
|
| 157 |
+
>;
|
| 158 |
+
|
| 159 |
+
template <typename T>
|
| 160 |
+
using device_future = device_unique_eager_future<T>;
|
| 161 |
+
|
| 162 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 163 |
+
|
| 164 |
+
struct new_stream_t final {};
|
| 165 |
+
|
| 166 |
+
THRUST_INLINE_CONSTANT new_stream_t new_stream{};
|
| 167 |
+
|
| 168 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 169 |
+
|
| 170 |
+
using thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::when_all;
|
| 171 |
+
|
| 172 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 173 |
+
|
| 174 |
+
THRUST_NAMESPACE_END
|
| 175 |
+
|
| 176 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/generate.h
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file generate.h
|
| 19 |
+
* \brief Fills a range with values "generated" from a function of no arguments
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup transformations
|
| 30 |
+
* \{
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
/*! \p generate assigns the result of invoking \p gen, a function object that takes no arguments,
|
| 35 |
+
* to each element in the range <tt>[first,last)</tt>.
|
| 36 |
+
*
|
| 37 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 38 |
+
*
|
| 39 |
+
* \param exec The execution policy to use for parallelization.
|
| 40 |
+
* \param first The first element in the range of interest.
|
| 41 |
+
* \param last The last element in the range of interest.
|
| 42 |
+
* \param gen A function argument, taking no parameters, used to generate values to assign to
|
| 43 |
+
* elements in the range <tt>[first,last)</tt>.
|
| 44 |
+
*
|
| 45 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 46 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 47 |
+
* and \p ForwardIterator is mutable.
|
| 48 |
+
* \tparam Generator is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional">Generator</a>,
|
| 49 |
+
* and \p Generator's \c result_type is convertible to \p ForwardIterator's \c value_type.
|
| 50 |
+
*
|
| 51 |
+
* The following code snippet demonstrates how to fill a \c host_vector with random numbers,
|
| 52 |
+
* using the standard C library function \c rand using the \p thrust::host execution policy for parallelization:
|
| 53 |
+
*
|
| 54 |
+
* \code
|
| 55 |
+
* #include <thrust/generate.h>
|
| 56 |
+
* #include <thrust/host_vector.h>
|
| 57 |
+
* #include <thrust/execution_policy.h>
|
| 58 |
+
* #include <cstdlib>
|
| 59 |
+
* ...
|
| 60 |
+
* thrust::host_vector<int> v(10);
|
| 61 |
+
* srand(13);
|
| 62 |
+
* thrust::generate(thrust::host, v.begin(), v.end(), rand);
|
| 63 |
+
*
|
| 64 |
+
* // the elements of v are now pseudo-random numbers
|
| 65 |
+
* \endcode
|
| 66 |
+
*
|
| 67 |
+
* \see generate_n
|
| 68 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/generate
|
| 69 |
+
*/
|
| 70 |
+
template<typename DerivedPolicy,
|
| 71 |
+
typename ForwardIterator,
|
| 72 |
+
typename Generator>
|
| 73 |
+
__host__ __device__
|
| 74 |
+
void generate(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 75 |
+
ForwardIterator first,
|
| 76 |
+
ForwardIterator last,
|
| 77 |
+
Generator gen);
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
/*! \p generate assigns the result of invoking \p gen, a function object that takes no arguments,
|
| 81 |
+
* to each element in the range <tt>[first,last)</tt>.
|
| 82 |
+
*
|
| 83 |
+
* \param first The first element in the range of interest.
|
| 84 |
+
* \param last The last element in the range of interest.
|
| 85 |
+
* \param gen A function argument, taking no parameters, used to generate values to assign to
|
| 86 |
+
* elements in the range <tt>[first,last)</tt>.
|
| 87 |
+
*
|
| 88 |
+
* \tparam ForwardIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/forward_iterator">Forward Iterator</a>,
|
| 89 |
+
* and \p ForwardIterator is mutable.
|
| 90 |
+
* \tparam Generator is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional">Generator</a>,
|
| 91 |
+
* and \p Generator's \c result_type is convertible to \p ForwardIterator's \c value_type.
|
| 92 |
+
*
|
| 93 |
+
* The following code snippet demonstrates how to fill a \c host_vector with random numbers,
|
| 94 |
+
* using the standard C library function \c rand.
|
| 95 |
+
*
|
| 96 |
+
* \code
|
| 97 |
+
* #include <thrust/generate.h>
|
| 98 |
+
* #include <thrust/host_vector.h>
|
| 99 |
+
* #include <thrust/execution_policy.h>
|
| 100 |
+
* #include <cstdlib>
|
| 101 |
+
* ...
|
| 102 |
+
* thrust::host_vector<int> v(10);
|
| 103 |
+
* srand(13);
|
| 104 |
+
* thrust::generate(v.begin(), v.end(), rand);
|
| 105 |
+
*
|
| 106 |
+
* // the elements of v are now pseudo-random numbers
|
| 107 |
+
* \endcode
|
| 108 |
+
*
|
| 109 |
+
* \see generate_n
|
| 110 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/generate
|
| 111 |
+
*/
|
| 112 |
+
template<typename ForwardIterator,
|
| 113 |
+
typename Generator>
|
| 114 |
+
void generate(ForwardIterator first,
|
| 115 |
+
ForwardIterator last,
|
| 116 |
+
Generator gen);
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
/*! \p generate_n assigns the result of invoking \p gen, a function object that takes no arguments,
|
| 120 |
+
* to each element in the range <tt>[first,first + n)</tt>. The return value is <tt>first + n</tt>.
|
| 121 |
+
*
|
| 122 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 123 |
+
*
|
| 124 |
+
* \param exec The execution policy to use for parallelization.
|
| 125 |
+
* \param first The first element in the range of interest.
|
| 126 |
+
* \param n The size of the range of interest.
|
| 127 |
+
* \param gen A function argument, taking no parameters, used to generate values to assign to
|
| 128 |
+
* elements in the range <tt>[first,first + n)</tt>.
|
| 129 |
+
*
|
| 130 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 131 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 132 |
+
* \tparam Size is an integral type (either signed or unsigned).
|
| 133 |
+
* \tparam Generator is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional">Generator</a>,
|
| 134 |
+
* and \p Generator's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 135 |
+
*
|
| 136 |
+
* The following code snippet demonstrates how to fill a \c host_vector with random numbers,
|
| 137 |
+
* using the standard C library function \c rand using the \p thrust::host execution policy for parallelization:
|
| 138 |
+
*
|
| 139 |
+
* \code
|
| 140 |
+
* #include <thrust/generate.h>
|
| 141 |
+
* #include <thrust/host_vector.h>
|
| 142 |
+
* #include <thrust/execution_policy.h>
|
| 143 |
+
* #include <cstdlib>
|
| 144 |
+
* ...
|
| 145 |
+
* thrust::host_vector<int> v(10);
|
| 146 |
+
* srand(13);
|
| 147 |
+
* thrust::generate_n(thrust::host, v.begin(), 10, rand);
|
| 148 |
+
*
|
| 149 |
+
* // the elements of v are now pseudo-random numbers
|
| 150 |
+
* \endcode
|
| 151 |
+
*
|
| 152 |
+
* \see generate
|
| 153 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/generate
|
| 154 |
+
*/
|
| 155 |
+
template<typename DerivedPolicy,
|
| 156 |
+
typename OutputIterator,
|
| 157 |
+
typename Size,
|
| 158 |
+
typename Generator>
|
| 159 |
+
__host__ __device__
|
| 160 |
+
OutputIterator generate_n(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 161 |
+
OutputIterator first,
|
| 162 |
+
Size n,
|
| 163 |
+
Generator gen);
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
/*! \p generate_n assigns the result of invoking \p gen, a function object that takes no arguments,
|
| 167 |
+
* to each element in the range <tt>[first,first + n)</tt>. The return value is <tt>first + n</tt>.
|
| 168 |
+
*
|
| 169 |
+
* \param first The first element in the range of interest.
|
| 170 |
+
* \param n The size of the range of interest.
|
| 171 |
+
* \param gen A function argument, taking no parameters, used to generate values to assign to
|
| 172 |
+
* elements in the range <tt>[first,first + n)</tt>.
|
| 173 |
+
*
|
| 174 |
+
* \tparam OutputIterator is a model of <a href="https://en.cppreference.com/w/cpp/iterator/output_iterator">Output Iterator</a>.
|
| 175 |
+
* \tparam Size is an integral type (either signed or unsigned).
|
| 176 |
+
* \tparam Generator is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional">Generator</a>,
|
| 177 |
+
* and \p Generator's \c result_type is convertible to a type in \p OutputIterator's set of \c value_types.
|
| 178 |
+
*
|
| 179 |
+
* The following code snippet demonstrates how to fill a \c host_vector with random numbers,
|
| 180 |
+
* using the standard C library function \c rand.
|
| 181 |
+
*
|
| 182 |
+
* \code
|
| 183 |
+
* #include <thrust/generate.h>
|
| 184 |
+
* #include <thrust/host_vector.h>
|
| 185 |
+
* #include <stdlib.h>
|
| 186 |
+
* ...
|
| 187 |
+
* thrust::host_vector<int> v(10);
|
| 188 |
+
* srand(13);
|
| 189 |
+
* thrust::generate_n(v.begin(), 10, rand);
|
| 190 |
+
*
|
| 191 |
+
* // the elements of v are now pseudo-random numbers
|
| 192 |
+
* \endcode
|
| 193 |
+
*
|
| 194 |
+
* \see generate
|
| 195 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/generate
|
| 196 |
+
*/
|
| 197 |
+
template<typename OutputIterator,
|
| 198 |
+
typename Size,
|
| 199 |
+
typename Generator>
|
| 200 |
+
OutputIterator generate_n(OutputIterator first,
|
| 201 |
+
Size n,
|
| 202 |
+
Generator gen);
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
/*! \} // end transformations
|
| 206 |
+
*/
|
| 207 |
+
|
| 208 |
+
THRUST_NAMESPACE_END
|
| 209 |
+
|
| 210 |
+
#include <thrust/detail/generate.inl>
|
| 211 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/inner_product.h
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
/*! \file inner_product.h
|
| 19 |
+
* \brief Mathematical inner product between ranges
|
| 20 |
+
*/
|
| 21 |
+
|
| 22 |
+
#pragma once
|
| 23 |
+
|
| 24 |
+
#include <thrust/detail/config.h>
|
| 25 |
+
#include <thrust/detail/execution_policy.h>
|
| 26 |
+
|
| 27 |
+
THRUST_NAMESPACE_BEGIN
|
| 28 |
+
|
| 29 |
+
/*! \addtogroup reductions
|
| 30 |
+
* \{
|
| 31 |
+
* \addtogroup transformed_reductions Transformed Reductions
|
| 32 |
+
* \ingroup reductions
|
| 33 |
+
* \{
|
| 34 |
+
*/
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
/*! \p inner_product calculates an inner product of the ranges
|
| 38 |
+
* <tt>[first1, last1)</tt> and <tt>[first2, first2 + (last1 - first1))</tt>.
|
| 39 |
+
*
|
| 40 |
+
* Specifically, this version of \p inner_product computes the sum
|
| 41 |
+
* <tt>init + (*first1 * *first2) + (*(first1+1) * *(first2+1)) + ... </tt>
|
| 42 |
+
*
|
| 43 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 44 |
+
*
|
| 45 |
+
* \param exec The execution policy to use for parallelization.
|
| 46 |
+
* \param first1 The beginning of the first sequence.
|
| 47 |
+
* \param last1 The end of the first sequence.
|
| 48 |
+
* \param first2 The beginning of the second sequence.
|
| 49 |
+
* \param init Initial value of the result.
|
| 50 |
+
* \return The inner product of sequences <tt>[first1, last1)</tt>
|
| 51 |
+
* and <tt>[first2, last2)</tt> plus \p init.
|
| 52 |
+
*
|
| 53 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 54 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 55 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 56 |
+
* \tparam OutputType is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 57 |
+
* and if \c x is an object of type \p OutputType, and \c y is an object of \p InputIterator1's \c value_type,
|
| 58 |
+
* and \c z is an object of \p InputIterator2's \c value_type, then <tt>x + y * z</tt> is defined
|
| 59 |
+
* and is convertible to \p OutputType.
|
| 60 |
+
*
|
| 61 |
+
* The following code demonstrates how to use \p inner_product to
|
| 62 |
+
* compute the dot product of two vectors using the \p thrust::host execution policy for parallelization.
|
| 63 |
+
*
|
| 64 |
+
* \code
|
| 65 |
+
* #include <thrust/inner_product.h>
|
| 66 |
+
* #include <thrust/execution_policy.h>
|
| 67 |
+
* ...
|
| 68 |
+
* float vec1[3] = {1.0f, 2.0f, 5.0f};
|
| 69 |
+
* float vec2[3] = {4.0f, 1.0f, 5.0f};
|
| 70 |
+
*
|
| 71 |
+
* float result = thrust::inner_product(thrust::host, vec1, vec1 + 3, vec2, 0.0f);
|
| 72 |
+
*
|
| 73 |
+
* // result == 31.0f
|
| 74 |
+
* \endcode
|
| 75 |
+
*
|
| 76 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/inner_product
|
| 77 |
+
*/
|
| 78 |
+
template<typename DerivedPolicy,
|
| 79 |
+
typename InputIterator1,
|
| 80 |
+
typename InputIterator2,
|
| 81 |
+
typename OutputType>
|
| 82 |
+
__host__ __device__
|
| 83 |
+
OutputType inner_product(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 84 |
+
InputIterator1 first1,
|
| 85 |
+
InputIterator1 last1,
|
| 86 |
+
InputIterator2 first2,
|
| 87 |
+
OutputType init);
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
/*! \p inner_product calculates an inner product of the ranges
|
| 91 |
+
* <tt>[first1, last1)</tt> and <tt>[first2, first2 + (last1 - first1))</tt>.
|
| 92 |
+
*
|
| 93 |
+
* Specifically, this version of \p inner_product computes the sum
|
| 94 |
+
* <tt>init + (*first1 * *first2) + (*(first1+1) * *(first2+1)) + ... </tt>
|
| 95 |
+
*
|
| 96 |
+
* Unlike the C++ Standard Template Library function <tt>std::inner_product</tt>,
|
| 97 |
+
* this version offers no guarantee on order of execution.
|
| 98 |
+
*
|
| 99 |
+
* \param first1 The beginning of the first sequence.
|
| 100 |
+
* \param last1 The end of the first sequence.
|
| 101 |
+
* \param first2 The beginning of the second sequence.
|
| 102 |
+
* \param init Initial value of the result.
|
| 103 |
+
* \return The inner product of sequences <tt>[first1, last1)</tt>
|
| 104 |
+
* and <tt>[first2, last2)</tt> plus \p init.
|
| 105 |
+
*
|
| 106 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 107 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 108 |
+
* \tparam OutputType is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 109 |
+
* and if \c x is an object of type \p OutputType, and \c y is an object of \p InputIterator1's \c value_type,
|
| 110 |
+
* and \c z is an object of \p InputIterator2's \c value_type, then <tt>x + y * z</tt> is defined
|
| 111 |
+
* and is convertible to \p OutputType.
|
| 112 |
+
*
|
| 113 |
+
* The following code demonstrates how to use \p inner_product to
|
| 114 |
+
* compute the dot product of two vectors.
|
| 115 |
+
*
|
| 116 |
+
* \code
|
| 117 |
+
* #include <thrust/inner_product.h>
|
| 118 |
+
* ...
|
| 119 |
+
* float vec1[3] = {1.0f, 2.0f, 5.0f};
|
| 120 |
+
* float vec2[3] = {4.0f, 1.0f, 5.0f};
|
| 121 |
+
*
|
| 122 |
+
* float result = thrust::inner_product(vec1, vec1 + 3, vec2, 0.0f);
|
| 123 |
+
*
|
| 124 |
+
* // result == 31.0f
|
| 125 |
+
* \endcode
|
| 126 |
+
*
|
| 127 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/inner_product
|
| 128 |
+
*/
|
| 129 |
+
template<typename InputIterator1, typename InputIterator2, typename OutputType>
|
| 130 |
+
OutputType inner_product(InputIterator1 first1, InputIterator1 last1,
|
| 131 |
+
InputIterator2 first2, OutputType init);
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
/*! \p inner_product calculates an inner product of the ranges
|
| 135 |
+
* <tt>[first1, last1)</tt> and <tt>[first2, first2 + (last1 - first1))</tt>.
|
| 136 |
+
*
|
| 137 |
+
* This version of \p inner_product is identical to the first, except that is uses
|
| 138 |
+
* two user-supplied function objects instead of \c operator+ and \c operator*.
|
| 139 |
+
*
|
| 140 |
+
* Specifically, this version of \p inner_product computes the sum
|
| 141 |
+
* <tt>binary_op1( init, binary_op2(*first1, *first2) ), ... </tt>
|
| 142 |
+
*
|
| 143 |
+
* The algorithm's execution is parallelized as determined by \p exec.
|
| 144 |
+
*
|
| 145 |
+
* \param exec The execution policy to use for parallelization.
|
| 146 |
+
* \param first1 The beginning of the first sequence.
|
| 147 |
+
* \param last1 The end of the first sequence.
|
| 148 |
+
* \param first2 The beginning of the second sequence.
|
| 149 |
+
* \param init Initial value of the result.
|
| 150 |
+
* \param binary_op1 Generalized addition operation.
|
| 151 |
+
* \param binary_op2 Generalized multiplication operation.
|
| 152 |
+
* \return The inner product of sequences <tt>[first1, last1)</tt> and <tt>[first2, last2)</tt>.
|
| 153 |
+
*
|
| 154 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 155 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 156 |
+
* and \p InputIterator1's \c value_type is convertible to \p BinaryFunction2's \c first_argument_type.
|
| 157 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 158 |
+
* and \p InputIterator2's \c value_type is convertible to \p BinaryFunction2's \c second_argument_type.
|
| 159 |
+
* \tparam OutputType is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 160 |
+
* and \p OutputType is convertible to \p BinaryFunction1's \c first_argument_type.
|
| 161 |
+
* \tparam BinaryFunction1 is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/binary_function">Binary Function</a>,
|
| 162 |
+
* and \p BinaryFunction1's \c return_type is convertible to \p OutputType.
|
| 163 |
+
* \tparam BinaryFunction2 is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/binary_function">Binary Function</a>,
|
| 164 |
+
* and \p BinaryFunction2's \c return_type is convertible to \p BinaryFunction1's \c second_argument_type.
|
| 165 |
+
*
|
| 166 |
+
* \code
|
| 167 |
+
* #include <thrust/inner_product.h>
|
| 168 |
+
* #include <thrust/execution_policy.h>
|
| 169 |
+
* ...
|
| 170 |
+
* float vec1[3] = {1.0f, 2.0f, 5.0f};
|
| 171 |
+
* float vec2[3] = {4.0f, 1.0f, 5.0f};
|
| 172 |
+
*
|
| 173 |
+
* float init = 0.0f;
|
| 174 |
+
* thrust::plus<float> binary_op1;
|
| 175 |
+
* thrust::multiplies<float> binary_op2;
|
| 176 |
+
*
|
| 177 |
+
* float result = thrust::inner_product(thrust::host, vec1, vec1 + 3, vec2, init, binary_op1, binary_op2);
|
| 178 |
+
*
|
| 179 |
+
* // result == 31.0f
|
| 180 |
+
* \endcode
|
| 181 |
+
*
|
| 182 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/inner_product
|
| 183 |
+
*/
|
| 184 |
+
template<typename DerivedPolicy,
|
| 185 |
+
typename InputIterator1,
|
| 186 |
+
typename InputIterator2,
|
| 187 |
+
typename OutputType,
|
| 188 |
+
typename BinaryFunction1,
|
| 189 |
+
typename BinaryFunction2>
|
| 190 |
+
__host__ __device__
|
| 191 |
+
OutputType inner_product(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
|
| 192 |
+
InputIterator1 first1,
|
| 193 |
+
InputIterator1 last1,
|
| 194 |
+
InputIterator2 first2,
|
| 195 |
+
OutputType init,
|
| 196 |
+
BinaryFunction1 binary_op1,
|
| 197 |
+
BinaryFunction2 binary_op2);
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
/*! \p inner_product calculates an inner product of the ranges
|
| 201 |
+
* <tt>[first1, last1)</tt> and <tt>[first2, first2 + (last1 - first1))</tt>.
|
| 202 |
+
*
|
| 203 |
+
* This version of \p inner_product is identical to the first, except that is uses
|
| 204 |
+
* two user-supplied function objects instead of \c operator+ and \c operator*.
|
| 205 |
+
*
|
| 206 |
+
* Specifically, this version of \p inner_product computes the sum
|
| 207 |
+
* <tt>binary_op1( init, binary_op2(*first1, *first2) ), ... </tt>
|
| 208 |
+
*
|
| 209 |
+
* Unlike the C++ Standard Template Library function <tt>std::inner_product</tt>,
|
| 210 |
+
* this version offers no guarantee on order of execution.
|
| 211 |
+
*
|
| 212 |
+
* \param first1 The beginning of the first sequence.
|
| 213 |
+
* \param last1 The end of the first sequence.
|
| 214 |
+
* \param first2 The beginning of the second sequence.
|
| 215 |
+
* \param init Initial value of the result.
|
| 216 |
+
* \param binary_op1 Generalized addition operation.
|
| 217 |
+
* \param binary_op2 Generalized multiplication operation.
|
| 218 |
+
* \return The inner product of sequences <tt>[first1, last1)</tt> and <tt>[first2, last2)</tt>.
|
| 219 |
+
*
|
| 220 |
+
* \tparam InputIterator1 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>,
|
| 221 |
+
* and \p InputIterator1's \c value_type is convertible to \p BinaryFunction2's \c first_argument_type.
|
| 222 |
+
* \tparam InputIterator2 is a model of <a href="https://en.cppreference.com/w/cpp/iterator/input_iterator">Input Iterator</a>.
|
| 223 |
+
* and \p InputIterator2's \c value_type is convertible to \p BinaryFunction2's \c second_argument_type.
|
| 224 |
+
* \tparam OutputType is a model of <a href="https://en.cppreference.com/w/cpp/named_req/CopyAssignable">Assignable</a>,
|
| 225 |
+
* and \p OutputType is convertible to \p BinaryFunction1's \c first_argument_type.
|
| 226 |
+
* \tparam BinaryFunction1 is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/binary_function">Binary Function</a>,
|
| 227 |
+
* and \p BinaryFunction1's \c return_type is convertible to \p OutputType.
|
| 228 |
+
* \tparam BinaryFunction2 is a model of <a href="https://en.cppreference.com/w/cpp/utility/functional/binary_function">Binary Function</a>,
|
| 229 |
+
* and \p BinaryFunction2's \c return_type is convertible to \p BinaryFunction1's \c second_argument_type.
|
| 230 |
+
*
|
| 231 |
+
* \code
|
| 232 |
+
* #include <thrust/inner_product.h>
|
| 233 |
+
* ...
|
| 234 |
+
* float vec1[3] = {1.0f, 2.0f, 5.0f};
|
| 235 |
+
* float vec2[3] = {4.0f, 1.0f, 5.0f};
|
| 236 |
+
*
|
| 237 |
+
* float init = 0.0f;
|
| 238 |
+
* thrust::plus<float> binary_op1;
|
| 239 |
+
* thrust::multiplies<float> binary_op2;
|
| 240 |
+
*
|
| 241 |
+
* float result = thrust::inner_product(vec1, vec1 + 3, vec2, init, binary_op1, binary_op2);
|
| 242 |
+
*
|
| 243 |
+
* // result == 31.0f
|
| 244 |
+
* \endcode
|
| 245 |
+
*
|
| 246 |
+
* \see https://en.cppreference.com/w/cpp/algorithm/inner_product
|
| 247 |
+
*/
|
| 248 |
+
template<typename InputIterator1, typename InputIterator2, typename OutputType,
|
| 249 |
+
typename BinaryFunction1, typename BinaryFunction2>
|
| 250 |
+
OutputType inner_product(InputIterator1 first1, InputIterator1 last1,
|
| 251 |
+
InputIterator2 first2, OutputType init,
|
| 252 |
+
BinaryFunction1 binary_op1, BinaryFunction2 binary_op2);
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
/*! \} // end transformed_reductions
|
| 256 |
+
* \} // end reductions
|
| 257 |
+
*/
|
| 258 |
+
|
| 259 |
+
THRUST_NAMESPACE_END
|
| 260 |
+
|
| 261 |
+
#include <thrust/detail/inner_product.inl>
|
| 262 |
+
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/limits.h
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) 2018 NVIDIA Corporation
|
| 2 |
+
// Author: Bryce Adelstein Lelbach <brycelelbach@gmail.com>
|
| 3 |
+
//
|
| 4 |
+
// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt)
|
| 5 |
+
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <limits>
|
| 9 |
+
|
| 10 |
+
#include <thrust/detail/config.h>
|
| 11 |
+
#include <thrust/detail/type_traits.h>
|
| 12 |
+
|
| 13 |
+
THRUST_NAMESPACE_BEGIN
|
| 14 |
+
|
| 15 |
+
template <typename T>
|
| 16 |
+
struct numeric_limits : std::numeric_limits<T> {};
|
| 17 |
+
|
| 18 |
+
THRUST_NAMESPACE_END
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/local_config_cuda/cuda/cuda/include/thrust/memory.h
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2008-2013 NVIDIA Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
/*! \file thrust/memory.h
|
| 18 |
+
* \brief Abstractions for Thrust's memory model.
|
| 19 |
+
*/
|
| 20 |
+
|
| 21 |
+
#pragma once
|
| 22 |
+
|
| 23 |
+
#include <thrust/detail/config.h>
|
| 24 |
+
#include <thrust/detail/type_traits/pointer_traits.h>
|
| 25 |
+
#include <thrust/detail/pointer.h>
|
| 26 |
+
#include <thrust/detail/reference.h>
|
| 27 |
+
#include <thrust/detail/raw_pointer_cast.h>
|
| 28 |
+
#include <thrust/detail/raw_reference_cast.h>
|
| 29 |
+
#include <thrust/detail/malloc_and_free.h>
|
| 30 |
+
#include <thrust/detail/temporary_buffer.h>
|
| 31 |
+
|
| 32 |
+
THRUST_NAMESPACE_BEGIN
|
| 33 |
+
|
| 34 |
+
/*! \defgroup memory_management Memory Management
|
| 35 |
+
*
|
| 36 |
+
* All Thrust functionalities related to memory allocation and deallocation.
|
| 37 |
+
*
|
| 38 |
+
*/
|
| 39 |
+
|
| 40 |
+
/** \addtogroup memory_management Memory Management
|
| 41 |
+
* \{
|
| 42 |
+
*/
|
| 43 |
+
|
| 44 |
+
// define pointer for the purpose of Doxygenating it
|
| 45 |
+
// it is actually defined elsewhere
|
| 46 |
+
#if 0
|
| 47 |
+
/*! \p pointer stores a pointer to an object allocated in memory. Like \p device_ptr, this
|
| 48 |
+
* type ensures type safety when dispatching standard algorithms on ranges resident in memory.
|
| 49 |
+
*
|
| 50 |
+
* \p pointer generalizes \p device_ptr by relaxing the backend system associated with the \p pointer.
|
| 51 |
+
* Instead of the backend system specified by \p THRUST_DEVICE_SYSTEM, \p pointer's
|
| 52 |
+
* system is given by its second template parameter, \p Tag. For the purpose of Thrust dispatch,
|
| 53 |
+
* <tt>device_ptr<Element></tt> and <tt>pointer<Element,device_system_tag></tt> are considered equivalent.
|
| 54 |
+
*
|
| 55 |
+
* The raw pointer encapsulated by a \p pointer may be obtained through its <tt>get</tt> member function
|
| 56 |
+
* or the \p raw_pointer_cast free function.
|
| 57 |
+
*
|
| 58 |
+
* \tparam Element specifies the type of the pointed-to object.
|
| 59 |
+
*
|
| 60 |
+
* \tparam Tag specifies the system with which this \p pointer is associated. This may be any Thrust
|
| 61 |
+
* backend system, or a user-defined tag.
|
| 62 |
+
*
|
| 63 |
+
* \tparam Reference allows the client to specify the reference type returned upon derereference.
|
| 64 |
+
* By default, this type is <tt>reference<Element,pointer></tt>.
|
| 65 |
+
*
|
| 66 |
+
* \tparam Derived allows the client to specify the name of the derived type when \p pointer is used as
|
| 67 |
+
* a base class. This is useful to ensure that arithmetic on values of the derived type return
|
| 68 |
+
* values of the derived type as a result. By default, this type is <tt>pointer<Element,Tag,Reference></tt>.
|
| 69 |
+
*
|
| 70 |
+
* \note \p pointer is not a smart pointer; it is the client's responsibility to deallocate memory
|
| 71 |
+
* pointer to by \p pointer.
|
| 72 |
+
*
|
| 73 |
+
* \see device_ptr
|
| 74 |
+
* \see reference
|
| 75 |
+
* \see raw_pointer_cast
|
| 76 |
+
*/
|
| 77 |
+
template<typename Element, typename Tag, typename Reference = thrust::use_default, typename Derived = thrust::use_default>
|
| 78 |
+
class pointer
|
| 79 |
+
{
|
| 80 |
+
public:
|
| 81 |
+
/*! The type of the raw pointer
|
| 82 |
+
*/
|
| 83 |
+
typedef typename super_t::base_type raw_pointer;
|
| 84 |
+
|
| 85 |
+
/*! \p pointer's default constructor initializes its encapsulated pointer to \c 0
|
| 86 |
+
*/
|
| 87 |
+
__host__ __device__
|
| 88 |
+
pointer();
|
| 89 |
+
|
| 90 |
+
/*! This constructor allows construction of a <tt>pointer<const T, ...></tt> from a <tt>T*</tt>.
|
| 91 |
+
*
|
| 92 |
+
* \param ptr A raw pointer to copy from, presumed to point to a location in \p Tag's memory.
|
| 93 |
+
* \tparam OtherElement \p OtherElement shall be convertible to \p Element.
|
| 94 |
+
*/
|
| 95 |
+
template<typename OtherElement>
|
| 96 |
+
__host__ __device__
|
| 97 |
+
explicit pointer(OtherElement *ptr);
|
| 98 |
+
|
| 99 |
+
/*! This contructor allows initialization from another pointer-like object.
|
| 100 |
+
*
|
| 101 |
+
* \param other The \p OtherPointer to copy.
|
| 102 |
+
*
|
| 103 |
+
* \tparam OtherPointer The tag associated with \p OtherPointer shall be convertible to \p Tag,
|
| 104 |
+
* and its element type shall be convertible to \p Element.
|
| 105 |
+
*/
|
| 106 |
+
template<typename OtherPointer>
|
| 107 |
+
__host__ __device__
|
| 108 |
+
pointer(const OtherPointer &other,
|
| 109 |
+
typename thrust::detail::enable_if_pointer_is_convertible<
|
| 110 |
+
OtherPointer,
|
| 111 |
+
pointer<Element,Tag,Reference,Derived>
|
| 112 |
+
>::type * = 0);
|
| 113 |
+
|
| 114 |
+
/*! Assignment operator allows assigning from another pointer-like object whose element type
|
| 115 |
+
* is convertible to \c Element.
|
| 116 |
+
*
|
| 117 |
+
* \param other The other pointer-like object to assign from.
|
| 118 |
+
* \return <tt>*this</tt>
|
| 119 |
+
*
|
| 120 |
+
* \tparam OtherPointer The tag associated with \p OtherPointer shall be convertible to \p Tag,
|
| 121 |
+
* and its element type shall be convertible to \p Element.
|
| 122 |
+
*/
|
| 123 |
+
template<typename OtherPointer>
|
| 124 |
+
__host__ __device__
|
| 125 |
+
typename thrust::detail::enable_if_pointer_is_convertible<
|
| 126 |
+
OtherPointer,
|
| 127 |
+
pointer,
|
| 128 |
+
derived_type &
|
| 129 |
+
>::type
|
| 130 |
+
operator=(const OtherPointer &other);
|
| 131 |
+
|
| 132 |
+
/*! \p get returns this \p pointer's encapsulated raw pointer.
|
| 133 |
+
* \return This \p pointer's raw pointer.
|
| 134 |
+
*/
|
| 135 |
+
__host__ __device__
|
| 136 |
+
Element *get() const;
|
| 137 |
+
};
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
/*! This version of \p malloc allocates untyped uninitialized storage associated with a given system.
|
| 141 |
+
*
|
| 142 |
+
* \param system The Thrust system with which to associate the storage.
|
| 143 |
+
* \param n The number of bytes of storage to allocate.
|
| 144 |
+
* \return If allocation succeeds, a pointer to the allocated storage; a null pointer otherwise.
|
| 145 |
+
* The pointer must be deallocated with \p thrust::free.
|
| 146 |
+
*
|
| 147 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 148 |
+
*
|
| 149 |
+
* \pre \p DerivedPolicy must be publically derived from <tt>thrust::execution_policy<DerivedPolicy></tt>.
|
| 150 |
+
*
|
| 151 |
+
* The following code snippet demonstrates how to use \p malloc to allocate a range of memory
|
| 152 |
+
* associated with Thrust's device system.
|
| 153 |
+
*
|
| 154 |
+
* \code
|
| 155 |
+
* #include <thrust/memory.h>
|
| 156 |
+
* ...
|
| 157 |
+
* // allocate some memory with thrust::malloc
|
| 158 |
+
* const int N = 100;
|
| 159 |
+
* thrust::device_system_tag device_sys;
|
| 160 |
+
* thrust::pointer<void,thrust::device_space_tag> void_ptr = thrust::malloc(device_sys, N);
|
| 161 |
+
*
|
| 162 |
+
* // manipulate memory
|
| 163 |
+
* ...
|
| 164 |
+
*
|
| 165 |
+
* // deallocate void_ptr with thrust::free
|
| 166 |
+
* thrust::free(device_sys, void_ptr);
|
| 167 |
+
* \endcode
|
| 168 |
+
*
|
| 169 |
+
* \see free
|
| 170 |
+
* \see device_malloc
|
| 171 |
+
*/
|
| 172 |
+
template<typename DerivedPolicy>
|
| 173 |
+
__host__ __device__
|
| 174 |
+
pointer<void,DerivedPolicy> malloc(const thrust::detail::execution_policy_base<DerivedPolicy> &system, std::size_t n);
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
/*! This version of \p malloc allocates typed uninitialized storage associated with a given system.
|
| 178 |
+
*
|
| 179 |
+
* \param system The Thrust system with which to associate the storage.
|
| 180 |
+
* \param n The number of elements of type \c T which the storage should accomodate.
|
| 181 |
+
* \return If allocation succeeds, a pointer to an allocation large enough to accomodate \c n
|
| 182 |
+
* elements of type \c T; a null pointer otherwise.
|
| 183 |
+
* The pointer must be deallocated with \p thrust::free.
|
| 184 |
+
*
|
| 185 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 186 |
+
*
|
| 187 |
+
* \pre \p DerivedPolicy must be publically derived from <tt>thrust::execution_policy<DerivedPolicy></tt>.
|
| 188 |
+
*
|
| 189 |
+
* The following code snippet demonstrates how to use \p malloc to allocate a range of memory
|
| 190 |
+
* to accomodate integers associated with Thrust's device system.
|
| 191 |
+
*
|
| 192 |
+
* \code
|
| 193 |
+
* #include <thrust/memory.h>
|
| 194 |
+
* ...
|
| 195 |
+
* // allocate storage for 100 ints with thrust::malloc
|
| 196 |
+
* const int N = 100;
|
| 197 |
+
* thrust::device_system_tag device_sys;
|
| 198 |
+
* thrust::pointer<int,thrust::device_system_tag> ptr = thrust::malloc<int>(device_sys, N);
|
| 199 |
+
*
|
| 200 |
+
* // manipulate memory
|
| 201 |
+
* ...
|
| 202 |
+
*
|
| 203 |
+
* // deallocate ptr with thrust::free
|
| 204 |
+
* thrust::free(device_sys, ptr);
|
| 205 |
+
* \endcode
|
| 206 |
+
*
|
| 207 |
+
* \see free
|
| 208 |
+
* \see device_malloc
|
| 209 |
+
*/
|
| 210 |
+
template<typename T, typename DerivedPolicy>
|
| 211 |
+
__host__ __device__
|
| 212 |
+
pointer<T,DerivedPolicy> malloc(const thrust::detail::execution_policy_base<DerivedPolicy> &system, std::size_t n);
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
/*! \p get_temporary_buffer returns a pointer to storage associated with a given Thrust system sufficient to store up to
|
| 216 |
+
* \p n objects of type \c T. If not enough storage is available to accomodate \p n objects, an implementation may return
|
| 217 |
+
* a smaller buffer. The number of objects the returned buffer can accomodate is also returned.
|
| 218 |
+
*
|
| 219 |
+
* Thrust uses \p get_temporary_buffer internally when allocating temporary storage required by algorithm implementations.
|
| 220 |
+
*
|
| 221 |
+
* The storage allocated with \p get_temporary_buffer must be returned to the system with \p return_temporary_buffer.
|
| 222 |
+
*
|
| 223 |
+
* \param system The Thrust system with which to associate the storage.
|
| 224 |
+
* \param n The requested number of objects of type \c T the storage should accomodate.
|
| 225 |
+
* \return A pair \c p such that <tt>p.first</tt> is a pointer to the allocated storage and <tt>p.second</tt> is the number of
|
| 226 |
+
* contiguous objects of type \c T that the storage can accomodate. If no storage can be allocated, <tt>p.first</tt> if
|
| 227 |
+
* no storage can be obtained. The storage must be returned to the system using \p return_temporary_buffer.
|
| 228 |
+
*
|
| 229 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 230 |
+
*
|
| 231 |
+
* \pre \p DerivedPolicy must be publically derived from <tt>thrust::execution_policy<DerivedPolicy></tt>.
|
| 232 |
+
*
|
| 233 |
+
* The following code snippet demonstrates how to use \p get_temporary_buffer to allocate a range of memory
|
| 234 |
+
* to accomodate integers associated with Thrust's device system.
|
| 235 |
+
*
|
| 236 |
+
* \code
|
| 237 |
+
* #include <thrust/memory.h>
|
| 238 |
+
* ...
|
| 239 |
+
* // allocate storage for 100 ints with thrust::get_temporary_buffer
|
| 240 |
+
* const int N = 100;
|
| 241 |
+
*
|
| 242 |
+
* typedef thrust::pair<
|
| 243 |
+
* thrust::pointer<int,thrust::device_system_tag>,
|
| 244 |
+
* std::ptrdiff_t
|
| 245 |
+
* > ptr_and_size_t;
|
| 246 |
+
*
|
| 247 |
+
* thrust::device_system_tag device_sys;
|
| 248 |
+
* ptr_and_size_t ptr_and_size = thrust::get_temporary_buffer<int>(device_sys, N);
|
| 249 |
+
*
|
| 250 |
+
* // manipulate up to 100 ints
|
| 251 |
+
* for(int i = 0; i < ptr_and_size.second; ++i)
|
| 252 |
+
* {
|
| 253 |
+
* *ptr_and_size.first = i;
|
| 254 |
+
* }
|
| 255 |
+
*
|
| 256 |
+
* // deallocate storage with thrust::return_temporary_buffer
|
| 257 |
+
* thrust::return_temporary_buffer(device_sys, ptr_and_size.first);
|
| 258 |
+
* \endcode
|
| 259 |
+
*
|
| 260 |
+
* \see malloc
|
| 261 |
+
* \see return_temporary_buffer
|
| 262 |
+
*/
|
| 263 |
+
template<typename T, typename DerivedPolicy>
|
| 264 |
+
__host__ __device__
|
| 265 |
+
thrust::pair<thrust::pointer<T,DerivedPolicy>, typename thrust::pointer<T,DerivedPolicy>::difference_type>
|
| 266 |
+
get_temporary_buffer(const thrust::detail::execution_policy_base<DerivedPolicy> &system, typename thrust::pointer<T,DerivedPolicy>::difference_type n);
|
| 267 |
+
|
| 268 |
+
/*! \p free deallocates the storage previously allocated by \p thrust::malloc.
|
| 269 |
+
*
|
| 270 |
+
* \param system The Thrust system with which the storage is associated.
|
| 271 |
+
* \param ptr A pointer previously returned by \p thrust::malloc. If \p ptr is null, \p free
|
| 272 |
+
* does nothing.
|
| 273 |
+
*
|
| 274 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 275 |
+
*
|
| 276 |
+
* \pre \p ptr shall have been returned by a previous call to <tt>thrust::malloc(system, n)</tt> or <tt>thrust::malloc<T>(system, n)</tt> for some type \c T.
|
| 277 |
+
*
|
| 278 |
+
* The following code snippet demonstrates how to use \p free to deallocate a range of memory
|
| 279 |
+
* previously allocated with \p thrust::malloc.
|
| 280 |
+
*
|
| 281 |
+
* \code
|
| 282 |
+
* #include <thrust/memory.h>
|
| 283 |
+
* ...
|
| 284 |
+
* // allocate storage for 100 ints with thrust::malloc
|
| 285 |
+
* const int N = 100;
|
| 286 |
+
* thrust::device_system_tag device_sys;
|
| 287 |
+
* thrust::pointer<int,thrust::device_system_tag> ptr = thrust::malloc<int>(device_sys, N);
|
| 288 |
+
*
|
| 289 |
+
* // mainpulate memory
|
| 290 |
+
* ...
|
| 291 |
+
*
|
| 292 |
+
* // deallocate ptr with thrust::free
|
| 293 |
+
* thrust::free(device_sys, ptr);
|
| 294 |
+
* \endcode
|
| 295 |
+
*/
|
| 296 |
+
template<typename DerivedPolicy, typename Pointer>
|
| 297 |
+
__host__ __device__
|
| 298 |
+
void free(const thrust::detail::execution_policy_base<DerivedPolicy> &system, Pointer ptr);
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
/*! \p return_temporary_buffer deallocates storage associated with a given Thrust system previously allocated by \p get_temporary_buffer.
|
| 302 |
+
*
|
| 303 |
+
* Thrust uses \p return_temporary_buffer internally when deallocating temporary storage required by algorithm implementations.
|
| 304 |
+
*
|
| 305 |
+
* \param system The Thrust system with which the storage is associated.
|
| 306 |
+
* \param p A pointer previously returned by \p thrust::get_temporary_buffer. If \p ptr is null, \p return_temporary_buffer does nothing.
|
| 307 |
+
*
|
| 308 |
+
* \tparam DerivedPolicy The name of the derived execution policy.
|
| 309 |
+
*
|
| 310 |
+
* \pre \p p shall have been previously allocated by \p thrust::get_temporary_buffer.
|
| 311 |
+
*
|
| 312 |
+
* The following code snippet demonstrates how to use \p return_temporary_buffer to deallocate a range of memory
|
| 313 |
+
* previously allocated by \p get_temporary_buffer.
|
| 314 |
+
*
|
| 315 |
+
* \code
|
| 316 |
+
* #include <thrust/memory.h>
|
| 317 |
+
* ...
|
| 318 |
+
* // allocate storage for 100 ints with thrust::get_temporary_buffer
|
| 319 |
+
* const int N = 100;
|
| 320 |
+
*
|
| 321 |
+
* typedef thrust::pair<
|
| 322 |
+
* thrust::pointer<int,thrust::device_system_tag>,
|
| 323 |
+
* std::ptrdiff_t
|
| 324 |
+
* > ptr_and_size_t;
|
| 325 |
+
*
|
| 326 |
+
* thrust::device_system_tag device_sys;
|
| 327 |
+
* ptr_and_size_t ptr_and_size = thrust::get_temporary_buffer<int>(device_sys, N);
|
| 328 |
+
*
|
| 329 |
+
* // manipulate up to 100 ints
|
| 330 |
+
* for(int i = 0; i < ptr_and_size.second; ++i)
|
| 331 |
+
* {
|
| 332 |
+
* *ptr_and_size.first = i;
|
| 333 |
+
* }
|
| 334 |
+
*
|
| 335 |
+
* // deallocate storage with thrust::return_temporary_buffer
|
| 336 |
+
* thrust::return_temporary_buffer(device_sys, ptr_and_size.first);
|
| 337 |
+
* \endcode
|
| 338 |
+
*
|
| 339 |
+
* \see free
|
| 340 |
+
* \see get_temporary_buffer
|
| 341 |
+
*/
|
| 342 |
+
template<typename DerivedPolicy, typename Pointer>
|
| 343 |
+
__host__ __device__
|
| 344 |
+
void return_temporary_buffer(const thrust::detail::execution_policy_base<DerivedPolicy> &system, Pointer p, std::ptrdiff_t n);
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
/*! \p raw_pointer_cast creates a "raw" pointer from a pointer-like type,
|
| 348 |
+
* simply returning the wrapped pointer, should it exist.
|
| 349 |
+
*
|
| 350 |
+
* \param ptr The pointer of interest.
|
| 351 |
+
* \return <tt>ptr.get()</tt>, if the expression is well formed; <tt>ptr</tt>, otherwise.
|
| 352 |
+
* \see raw_reference_cast
|
| 353 |
+
*/
|
| 354 |
+
template<typename Pointer>
|
| 355 |
+
__host__ __device__
|
| 356 |
+
typename thrust::detail::pointer_traits<Pointer>::raw_pointer
|
| 357 |
+
raw_pointer_cast(Pointer ptr);
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
/*! \p raw_reference_cast creates a "raw" reference from a wrapped reference type,
|
| 361 |
+
* simply returning the underlying reference, should it exist.
|
| 362 |
+
*
|
| 363 |
+
* If the argument is not a reference wrapper, the result is a reference to the argument.
|
| 364 |
+
*
|
| 365 |
+
* \param ref The reference of interest.
|
| 366 |
+
* \return <tt>*thrust::raw_pointer_cast(&ref)</tt>.
|
| 367 |
+
* \note There are two versions of \p raw_reference_cast. One for <tt>const</tt> references,
|
| 368 |
+
* and one for non-<tt>const</tt>.
|
| 369 |
+
* \see raw_pointer_cast
|
| 370 |
+
*/
|
| 371 |
+
template<typename T>
|
| 372 |
+
__host__ __device__
|
| 373 |
+
typename detail::raw_reference<T>::type
|
| 374 |
+
raw_reference_cast(T &ref);
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
/*! \p raw_reference_cast creates a "raw" reference from a wrapped reference type,
|
| 378 |
+
* simply returning the underlying reference, should it exist.
|
| 379 |
+
*
|
| 380 |
+
* If the argument is not a reference wrapper, the result is a reference to the argument.
|
| 381 |
+
*
|
| 382 |
+
* \param ref The reference of interest.
|
| 383 |
+
* \return <tt>*thrust::raw_pointer_cast(&ref)</tt>.
|
| 384 |
+
* \note There are two versions of \p raw_reference_cast. One for <tt>const</tt> references,
|
| 385 |
+
* and one for non-<tt>const</tt>.
|
| 386 |
+
* \see raw_pointer_cast
|
| 387 |
+
*/
|
| 388 |
+
template<typename T>
|
| 389 |
+
__host__ __device__
|
| 390 |
+
typename detail::raw_reference<const T>::type
|
| 391 |
+
raw_reference_cast(const T &ref);
|
| 392 |
+
|
| 393 |
+
/*! \} // memory_management
|
| 394 |
+
*/
|
| 395 |
+
|
| 396 |
+
THRUST_NAMESPACE_END
|