Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so +3 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 +3 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h +65 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h +1690 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h +174 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h +189 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h +62 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/reduce.h +63 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/scan.h +63 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h +348 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h +0 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h +96 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h +78 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h +282 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h +90 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h +109 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h +642 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp +1546 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h +514 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h +1958 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h +0 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h +201 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h +57 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp +224 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h +65 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h +81 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h +65 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h +65 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h +60 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.h +114 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h +221 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp +604 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h +141 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp +134 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp +588 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h +543 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp +527 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp +161 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h +124 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h +243 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_fetch_functions.h +223 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h +175 -0
- omnilmm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 +3 -0
- wemm/lib/python3.10/site-packages/sympy/physics/__pycache__/paulialgebra.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/sympy/physics/__pycache__/sho.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/sympy/physics/__pycache__/wigner.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/sympy/physics/biomechanics/__init__.py +53 -0
- wemm/lib/python3.10/site-packages/sympy/physics/biomechanics/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/sympy/physics/biomechanics/__pycache__/_mixin.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -985,3 +985,6 @@ omnilmm/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter
|
|
| 985 |
omnilmm/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
|
| 986 |
omnilmm/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 987 |
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 985 |
omnilmm/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
|
| 986 |
omnilmm/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 987 |
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text
|
| 988 |
+
omnilmm/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text
|
| 989 |
+
omnilmm/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
|
| 990 |
+
omnilmm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95cec42ae770c1f2251d204b03e12d56fdb2e5561e4898c07b40382fe2474589
|
| 3 |
+
size 28636664
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c5639ce397a9f5b82cd277432d146370674358334a4ce0d33fa9a5ca090ac8a
|
| 3 |
+
size 6842248
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#include "crt/common_functions.h"
|
| 61 |
+
|
| 62 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__)
|
| 63 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 64 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
|
| 65 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h
ADDED
|
@@ -0,0 +1,1690 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef _COOPERATIVE_GROUPS_H_
|
| 51 |
+
#define _COOPERATIVE_GROUPS_H_
|
| 52 |
+
|
| 53 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 54 |
+
|
| 55 |
+
#include "cooperative_groups/details/info.h"
|
| 56 |
+
#include "cooperative_groups/details/driver_abi.h"
|
| 57 |
+
#include "cooperative_groups/details/helpers.h"
|
| 58 |
+
#include "cooperative_groups/details/memory.h"
|
| 59 |
+
|
| 60 |
+
#if defined(_CG_HAS_STL_ATOMICS)
|
| 61 |
+
#include <cuda/atomic>
|
| 62 |
+
#define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope;
|
| 63 |
+
#else
|
| 64 |
+
#define _CG_THREAD_SCOPE(scope)
|
| 65 |
+
#endif
|
| 66 |
+
|
| 67 |
+
_CG_BEGIN_NAMESPACE
|
| 68 |
+
|
| 69 |
+
namespace details {
|
| 70 |
+
_CG_CONST_DECL unsigned int coalesced_group_id = 1;
|
| 71 |
+
_CG_CONST_DECL unsigned int multi_grid_group_id = 2;
|
| 72 |
+
_CG_CONST_DECL unsigned int grid_group_id = 3;
|
| 73 |
+
_CG_CONST_DECL unsigned int thread_block_id = 4;
|
| 74 |
+
_CG_CONST_DECL unsigned int multi_tile_group_id = 5;
|
| 75 |
+
_CG_CONST_DECL unsigned int cluster_group_id = 6;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
/**
|
| 79 |
+
* class thread_group;
|
| 80 |
+
*
|
| 81 |
+
* Generic thread group type, into which all groups are convertible.
|
| 82 |
+
* It acts as a container for all storage necessary for the derived groups,
|
| 83 |
+
* and will dispatch the API calls to the correct derived group. This means
|
| 84 |
+
* that all derived groups must implement the same interface as thread_group.
|
| 85 |
+
*/
|
| 86 |
+
class thread_group
|
| 87 |
+
{
|
| 88 |
+
protected:
|
| 89 |
+
struct group_data {
|
| 90 |
+
unsigned int _unused : 1;
|
| 91 |
+
unsigned int type : 7, : 0;
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
struct gg_data {
|
| 95 |
+
details::grid_workspace *gridWs;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 99 |
+
struct mg_data {
|
| 100 |
+
unsigned long long _unused : 1;
|
| 101 |
+
unsigned long long type : 7;
|
| 102 |
+
unsigned long long handle : 56;
|
| 103 |
+
const details::multi_grid::multi_grid_functions *functions;
|
| 104 |
+
};
|
| 105 |
+
#endif
|
| 106 |
+
|
| 107 |
+
struct tg_data {
|
| 108 |
+
unsigned int is_tiled : 1;
|
| 109 |
+
unsigned int type : 7;
|
| 110 |
+
unsigned int size : 24;
|
| 111 |
+
// packed to 4b
|
| 112 |
+
unsigned int metaGroupSize : 16;
|
| 113 |
+
unsigned int metaGroupRank : 16;
|
| 114 |
+
// packed to 8b
|
| 115 |
+
unsigned int mask;
|
| 116 |
+
// packed to 12b
|
| 117 |
+
unsigned int _res;
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
|
| 121 |
+
friend class thread_block;
|
| 122 |
+
|
| 123 |
+
union __align__(8) {
|
| 124 |
+
group_data group;
|
| 125 |
+
tg_data coalesced;
|
| 126 |
+
gg_data grid;
|
| 127 |
+
#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 128 |
+
mg_data multi_grid;
|
| 129 |
+
#endif
|
| 130 |
+
} _data;
|
| 131 |
+
|
| 132 |
+
_CG_QUALIFIER thread_group operator=(const thread_group& src);
|
| 133 |
+
|
| 134 |
+
_CG_QUALIFIER thread_group(unsigned int type) {
|
| 135 |
+
_data.group.type = type;
|
| 136 |
+
_data.group._unused = false;
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
#ifdef _CG_CPP11_FEATURES
|
| 140 |
+
static_assert(sizeof(tg_data) <= 16, "Failed size check");
|
| 141 |
+
static_assert(sizeof(gg_data) <= 16, "Failed size check");
|
| 142 |
+
# ifdef _CG_ABI_EXPERIMENTAL
|
| 143 |
+
static_assert(sizeof(mg_data) <= 16, "Failed size check");
|
| 144 |
+
# endif
|
| 145 |
+
#endif
|
| 146 |
+
|
| 147 |
+
public:
|
| 148 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
|
| 149 |
+
|
| 150 |
+
_CG_QUALIFIER unsigned long long size() const;
|
| 151 |
+
_CG_QUALIFIER unsigned long long num_threads() const;
|
| 152 |
+
_CG_QUALIFIER unsigned long long thread_rank() const;
|
| 153 |
+
_CG_QUALIFIER void sync() const;
|
| 154 |
+
_CG_QUALIFIER unsigned int get_type() const {
|
| 155 |
+
return _data.group.type;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
template <unsigned int TyId>
|
| 161 |
+
struct thread_group_base : public thread_group {
|
| 162 |
+
_CG_QUALIFIER thread_group_base() : thread_group(TyId) {}
|
| 163 |
+
_CG_STATIC_CONST_DECL unsigned int id = TyId;
|
| 164 |
+
};
|
| 165 |
+
|
| 166 |
+
#if defined(_CG_HAS_MULTI_GRID_GROUP)
|
| 167 |
+
|
| 168 |
+
/**
|
| 169 |
+
* class multi_grid_group;
|
| 170 |
+
*
|
| 171 |
+
* Threads within this this group are guaranteed to be co-resident on the
|
| 172 |
+
* same system, on multiple devices within the same launched kernels.
|
| 173 |
+
* To use this group, the kernel must have been launched with
|
| 174 |
+
* cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent),
|
| 175 |
+
* and the device must support it (queryable device attribute).
|
| 176 |
+
*
|
| 177 |
+
* Constructed via this_multi_grid();
|
| 178 |
+
*/
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 182 |
+
class multi_grid_group;
|
| 183 |
+
|
| 184 |
+
// Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls
|
| 185 |
+
template <typename = void>
|
| 186 |
+
__device__ _CG_DEPRECATED multi_grid_group this_multi_grid();
|
| 187 |
+
|
| 188 |
+
class multi_grid_group : public thread_group_base<details::multi_grid_group_id>
|
| 189 |
+
{
|
| 190 |
+
private:
|
| 191 |
+
template <typename = void>
|
| 192 |
+
_CG_QUALIFIER multi_grid_group() {
|
| 193 |
+
_data.multi_grid.functions = details::multi_grid::load_grid_intrinsics();
|
| 194 |
+
_data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle();
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
friend multi_grid_group this_multi_grid<void>();
|
| 198 |
+
|
| 199 |
+
public:
|
| 200 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
|
| 201 |
+
|
| 202 |
+
_CG_QUALIFIER bool is_valid() const {
|
| 203 |
+
return (_data.multi_grid.handle != 0);
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
_CG_QUALIFIER void sync() const {
|
| 207 |
+
if (!is_valid()) {
|
| 208 |
+
_CG_ABORT();
|
| 209 |
+
}
|
| 210 |
+
_data.multi_grid.functions->sync(_data.multi_grid.handle);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
_CG_QUALIFIER unsigned long long num_threads() const {
|
| 214 |
+
_CG_ASSERT(is_valid());
|
| 215 |
+
return _data.multi_grid.functions->size(_data.multi_grid.handle);
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
_CG_QUALIFIER unsigned long long size() const {
|
| 219 |
+
return num_threads();
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
_CG_QUALIFIER unsigned long long thread_rank() const {
|
| 223 |
+
_CG_ASSERT(is_valid());
|
| 224 |
+
return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle);
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
_CG_QUALIFIER unsigned int grid_rank() const {
|
| 228 |
+
_CG_ASSERT(is_valid());
|
| 229 |
+
return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle));
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
_CG_QUALIFIER unsigned int num_grids() const {
|
| 233 |
+
_CG_ASSERT(is_valid());
|
| 234 |
+
return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle));
|
| 235 |
+
}
|
| 236 |
+
};
|
| 237 |
+
# else
|
| 238 |
+
class multi_grid_group
|
| 239 |
+
{
|
| 240 |
+
private:
|
| 241 |
+
unsigned long long _handle;
|
| 242 |
+
unsigned int _size;
|
| 243 |
+
unsigned int _rank;
|
| 244 |
+
|
| 245 |
+
friend _CG_QUALIFIER multi_grid_group this_multi_grid();
|
| 246 |
+
|
| 247 |
+
_CG_QUALIFIER multi_grid_group() {
|
| 248 |
+
_handle = details::multi_grid::get_intrinsic_handle();
|
| 249 |
+
_size = details::multi_grid::size(_handle);
|
| 250 |
+
_rank = details::multi_grid::thread_rank(_handle);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
public:
|
| 254 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system)
|
| 255 |
+
|
| 256 |
+
_CG_QUALIFIER _CG_DEPRECATED bool is_valid() const {
|
| 257 |
+
return (_handle != 0);
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
_CG_QUALIFIER _CG_DEPRECATED void sync() const {
|
| 261 |
+
if (!is_valid()) {
|
| 262 |
+
_CG_ABORT();
|
| 263 |
+
}
|
| 264 |
+
details::multi_grid::sync(_handle);
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
_CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const {
|
| 268 |
+
_CG_ASSERT(is_valid());
|
| 269 |
+
return _size;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
_CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const {
|
| 273 |
+
return num_threads();
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
_CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const {
|
| 277 |
+
_CG_ASSERT(is_valid());
|
| 278 |
+
return _rank;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
_CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const {
|
| 282 |
+
_CG_ASSERT(is_valid());
|
| 283 |
+
return (details::multi_grid::grid_rank(_handle));
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
_CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const {
|
| 287 |
+
_CG_ASSERT(is_valid());
|
| 288 |
+
return (details::multi_grid::num_grids(_handle));
|
| 289 |
+
}
|
| 290 |
+
};
|
| 291 |
+
# endif
|
| 292 |
+
|
| 293 |
+
/**
|
| 294 |
+
* multi_grid_group this_multi_grid()
|
| 295 |
+
*
|
| 296 |
+
* Constructs a multi_grid_group
|
| 297 |
+
*/
|
| 298 |
+
# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 299 |
+
template <typename>
|
| 300 |
+
__device__
|
| 301 |
+
#else
|
| 302 |
+
_CG_QUALIFIER
|
| 303 |
+
# endif
|
| 304 |
+
_CG_DEPRECATED
|
| 305 |
+
multi_grid_group this_multi_grid()
|
| 306 |
+
{
|
| 307 |
+
return multi_grid_group();
|
| 308 |
+
}
|
| 309 |
+
#endif
|
| 310 |
+
|
| 311 |
+
/**
|
| 312 |
+
* class grid_group;
|
| 313 |
+
*
|
| 314 |
+
* Threads within this this group are guaranteed to be co-resident on the
|
| 315 |
+
* same device within the same launched kernel. To use this group, the kernel
|
| 316 |
+
* must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent),
|
| 317 |
+
* and the device must support it (queryable device attribute).
|
| 318 |
+
*
|
| 319 |
+
* Constructed via this_grid();
|
| 320 |
+
*/
|
| 321 |
+
class grid_group : public thread_group_base<details::grid_group_id>
|
| 322 |
+
{
|
| 323 |
+
_CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id;
|
| 324 |
+
friend _CG_QUALIFIER grid_group this_grid();
|
| 325 |
+
|
| 326 |
+
private:
|
| 327 |
+
_CG_QUALIFIER grid_group(details::grid_workspace *gridWs) {
|
| 328 |
+
_data.grid.gridWs = gridWs;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
public:
|
| 332 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device)
|
| 333 |
+
|
| 334 |
+
_CG_QUALIFIER bool is_valid() const {
|
| 335 |
+
return (_data.grid.gridWs != NULL);
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
_CG_QUALIFIER void sync() const {
|
| 339 |
+
if (!is_valid()) {
|
| 340 |
+
_CG_ABORT();
|
| 341 |
+
}
|
| 342 |
+
details::grid::sync(&_data.grid.gridWs->barrier);
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
_CG_STATIC_QUALIFIER unsigned long long size() {
|
| 346 |
+
return details::grid::size();
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
_CG_STATIC_QUALIFIER unsigned long long thread_rank() {
|
| 350 |
+
return details::grid::thread_rank();
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
_CG_STATIC_QUALIFIER dim3 group_dim() {
|
| 354 |
+
return details::grid::grid_dim();
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
_CG_STATIC_QUALIFIER unsigned long long num_threads() {
|
| 358 |
+
return details::grid::num_threads();
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
_CG_STATIC_QUALIFIER dim3 dim_blocks() {
|
| 362 |
+
return details::grid::dim_blocks();
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
_CG_STATIC_QUALIFIER unsigned long long num_blocks() {
|
| 366 |
+
return details::grid::num_blocks();
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
_CG_STATIC_QUALIFIER dim3 block_index() {
|
| 370 |
+
return details::grid::block_index();
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
_CG_STATIC_QUALIFIER unsigned long long block_rank() {
|
| 374 |
+
return details::grid::block_rank();
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
# if defined(_CG_HAS_CLUSTER_GROUP)
|
| 378 |
+
_CG_STATIC_QUALIFIER dim3 dim_clusters() {
|
| 379 |
+
return details::grid::dim_clusters();
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
_CG_STATIC_QUALIFIER unsigned long long num_clusters() {
|
| 383 |
+
return details::grid::num_clusters();
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
_CG_STATIC_QUALIFIER dim3 cluster_index() {
|
| 387 |
+
return details::grid::cluster_index();
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
_CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
|
| 391 |
+
return details::grid::cluster_rank();
|
| 392 |
+
}
|
| 393 |
+
# endif
|
| 394 |
+
};
|
| 395 |
+
|
| 396 |
+
_CG_QUALIFIER grid_group this_grid() {
|
| 397 |
+
// Load a workspace from the driver
|
| 398 |
+
grid_group gg(details::get_grid_workspace());
|
| 399 |
+
#ifdef _CG_DEBUG
|
| 400 |
+
// *all* threads must be available to synchronize
|
| 401 |
+
gg.sync();
|
| 402 |
+
#endif // _CG_DEBUG
|
| 403 |
+
return gg;
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
#if defined(_CG_HAS_CLUSTER_GROUP)
|
| 407 |
+
/**
|
| 408 |
+
* class cluster_group
|
| 409 |
+
*
|
| 410 |
+
* Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly
|
| 411 |
+
* divided along all dimensions to form groups of blocks, each group of which is
|
| 412 |
+
* a block cluster. Clustered grids are subject to various restrictions and
|
| 413 |
+
* limitations. Primarily, a cluster consists of at most 8 blocks by default
|
| 414 |
+
* (although the user is allowed to opt-in to non-standard sizes,) and clustered
|
| 415 |
+
* grids are subject to additional occupancy limitations due to per-cluster
|
| 416 |
+
* hardware resource consumption. In exchange, a block cluster is guaranteed to
|
| 417 |
+
* be a cooperative group, with access to all cooperative group capabilities, as
|
| 418 |
+
* well as cluster specific capabilities and accelerations. A cluster_group
|
| 419 |
+
* represents a block cluster.
|
| 420 |
+
*
|
| 421 |
+
* Constructed via this_cluster_group();
|
| 422 |
+
*/
|
| 423 |
+
class cluster_group : public thread_group_base<details::cluster_group_id>
|
| 424 |
+
{
|
| 425 |
+
// Friends
|
| 426 |
+
friend _CG_QUALIFIER cluster_group this_cluster();
|
| 427 |
+
|
| 428 |
+
// Disable constructor
|
| 429 |
+
_CG_QUALIFIER cluster_group()
|
| 430 |
+
{
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
public:
|
| 434 |
+
//_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster)
|
| 435 |
+
|
| 436 |
+
using arrival_token = struct {};
|
| 437 |
+
|
| 438 |
+
// Functionality exposed by the group
|
| 439 |
+
_CG_STATIC_QUALIFIER void sync()
|
| 440 |
+
{
|
| 441 |
+
return details::cluster::sync();
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
_CG_STATIC_QUALIFIER arrival_token barrier_arrive()
|
| 445 |
+
{
|
| 446 |
+
details::cluster::barrier_arrive();
|
| 447 |
+
return arrival_token();
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
_CG_STATIC_QUALIFIER void barrier_wait()
|
| 451 |
+
{
|
| 452 |
+
return details::cluster::barrier_wait();
|
| 453 |
+
}
|
| 454 |
+
|
| 455 |
+
_CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&)
|
| 456 |
+
{
|
| 457 |
+
return details::cluster::barrier_wait();
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
_CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
|
| 461 |
+
{
|
| 462 |
+
return details::cluster::query_shared_rank(addr);
|
| 463 |
+
}
|
| 464 |
+
|
| 465 |
+
template <typename T>
|
| 466 |
+
_CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
|
| 467 |
+
{
|
| 468 |
+
return details::cluster::map_shared_rank(addr, rank);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
_CG_STATIC_QUALIFIER dim3 block_index()
|
| 472 |
+
{
|
| 473 |
+
return details::cluster::block_index();
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
_CG_STATIC_QUALIFIER unsigned int block_rank()
|
| 477 |
+
{
|
| 478 |
+
return details::cluster::block_rank();
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
_CG_STATIC_QUALIFIER unsigned int thread_rank()
|
| 482 |
+
{
|
| 483 |
+
return details::cluster::thread_rank();
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
_CG_STATIC_QUALIFIER dim3 dim_blocks()
|
| 487 |
+
{
|
| 488 |
+
return details::cluster::dim_blocks();
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
_CG_STATIC_QUALIFIER unsigned int num_blocks()
|
| 492 |
+
{
|
| 493 |
+
return details::cluster::num_blocks();
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
_CG_STATIC_QUALIFIER dim3 dim_threads()
|
| 497 |
+
{
|
| 498 |
+
return details::cluster::dim_threads();
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
_CG_STATIC_QUALIFIER unsigned int num_threads()
|
| 502 |
+
{
|
| 503 |
+
return details::cluster::num_threads();
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
// Legacy aliases
|
| 507 |
+
_CG_STATIC_QUALIFIER unsigned int size()
|
| 508 |
+
{
|
| 509 |
+
return num_threads();
|
| 510 |
+
}
|
| 511 |
+
};
|
| 512 |
+
|
| 513 |
+
/*
|
| 514 |
+
* cluster_group this_cluster()
|
| 515 |
+
*
|
| 516 |
+
* Constructs a cluster_group
|
| 517 |
+
*/
|
| 518 |
+
_CG_QUALIFIER cluster_group this_cluster()
|
| 519 |
+
{
|
| 520 |
+
cluster_group cg;
|
| 521 |
+
#ifdef _CG_DEBUG
|
| 522 |
+
cg.sync();
|
| 523 |
+
#endif
|
| 524 |
+
return cg;
|
| 525 |
+
}
|
| 526 |
+
#endif
|
| 527 |
+
|
| 528 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 529 |
+
class thread_block;
|
| 530 |
+
template <unsigned int MaxBlockSize>
|
| 531 |
+
_CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
|
| 532 |
+
#endif
|
| 533 |
+
|
| 534 |
+
/**
|
| 535 |
+
* class thread_block
|
| 536 |
+
*
|
| 537 |
+
* Every GPU kernel is executed by a grid of thread blocks, and threads within
|
| 538 |
+
* each block are guaranteed to reside on the same streaming multiprocessor.
|
| 539 |
+
* A thread_block represents a thread block whose dimensions are not known until runtime.
|
| 540 |
+
*
|
| 541 |
+
* Constructed via this_thread_block();
|
| 542 |
+
*/
|
| 543 |
+
class thread_block : public thread_group_base<details::thread_block_id>
|
| 544 |
+
{
|
| 545 |
+
// Friends
|
| 546 |
+
friend _CG_QUALIFIER thread_block this_thread_block();
|
| 547 |
+
friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
|
| 548 |
+
friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz);
|
| 549 |
+
|
| 550 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 551 |
+
template <unsigned int MaxBlockSize>
|
| 552 |
+
friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch);
|
| 553 |
+
template <unsigned int Size>
|
| 554 |
+
friend class __static_size_multi_warp_tile_base;
|
| 555 |
+
|
| 556 |
+
details::multi_warp_scratch* const tile_memory;
|
| 557 |
+
|
| 558 |
+
template <unsigned int MaxBlockSize>
|
| 559 |
+
_CG_QUALIFIER thread_block(block_tile_memory<MaxBlockSize>& scratch) :
|
| 560 |
+
tile_memory(details::get_scratch_ptr(&scratch)) {
|
| 561 |
+
#ifdef _CG_DEBUG
|
| 562 |
+
if (num_threads() > MaxBlockSize) {
|
| 563 |
+
details::abort();
|
| 564 |
+
}
|
| 565 |
+
#endif
|
| 566 |
+
#if !defined(_CG_HAS_RESERVED_SHARED)
|
| 567 |
+
tile_memory->init_barriers(thread_rank());
|
| 568 |
+
sync();
|
| 569 |
+
#endif
|
| 570 |
+
}
|
| 571 |
+
#endif
|
| 572 |
+
|
| 573 |
+
// Disable constructor
|
| 574 |
+
_CG_QUALIFIER thread_block()
|
| 575 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 576 |
+
: tile_memory(details::get_scratch_ptr(NULL))
|
| 577 |
+
#endif
|
| 578 |
+
{ }
|
| 579 |
+
|
| 580 |
+
// Internal Use
|
| 581 |
+
_CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const {
|
| 582 |
+
const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
|
| 583 |
+
|
| 584 |
+
// Invalid, immediately fail
|
| 585 |
+
if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
|
| 586 |
+
details::abort();
|
| 587 |
+
return (thread_block());
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
unsigned int mask;
|
| 591 |
+
unsigned int base_offset = thread_rank() & (~(tilesz - 1));
|
| 592 |
+
unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
|
| 593 |
+
|
| 594 |
+
mask = (unsigned int)(-1) >> (32 - masklength);
|
| 595 |
+
mask <<= (details::laneid() & ~(tilesz - 1));
|
| 596 |
+
thread_group tile = thread_group(details::coalesced_group_id);
|
| 597 |
+
tile._data.coalesced.mask = mask;
|
| 598 |
+
tile._data.coalesced.size = __popc(mask);
|
| 599 |
+
tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz;
|
| 600 |
+
tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz;
|
| 601 |
+
tile._data.coalesced.is_tiled = true;
|
| 602 |
+
return (tile);
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
public:
|
| 606 |
+
_CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id;
|
| 607 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
|
| 608 |
+
|
| 609 |
+
_CG_STATIC_QUALIFIER void sync() {
|
| 610 |
+
details::cta::sync();
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
_CG_STATIC_QUALIFIER unsigned int size() {
|
| 614 |
+
return details::cta::size();
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
_CG_STATIC_QUALIFIER unsigned int thread_rank() {
|
| 618 |
+
return details::cta::thread_rank();
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
// Additional functionality exposed by the group
|
| 622 |
+
_CG_STATIC_QUALIFIER dim3 group_index() {
|
| 623 |
+
return details::cta::group_index();
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
_CG_STATIC_QUALIFIER dim3 thread_index() {
|
| 627 |
+
return details::cta::thread_index();
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
_CG_STATIC_QUALIFIER dim3 group_dim() {
|
| 631 |
+
return details::cta::block_dim();
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
_CG_STATIC_QUALIFIER dim3 dim_threads() {
|
| 635 |
+
return details::cta::dim_threads();
|
| 636 |
+
}
|
| 637 |
+
|
| 638 |
+
_CG_STATIC_QUALIFIER unsigned int num_threads() {
|
| 639 |
+
return details::cta::num_threads();
|
| 640 |
+
}
|
| 641 |
+
|
| 642 |
+
};
|
| 643 |
+
|
| 644 |
+
/**
|
| 645 |
+
* thread_block this_thread_block()
|
| 646 |
+
*
|
| 647 |
+
* Constructs a thread_block group
|
| 648 |
+
*/
|
| 649 |
+
_CG_QUALIFIER thread_block this_thread_block()
|
| 650 |
+
{
|
| 651 |
+
return (thread_block());
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 655 |
+
template <unsigned int MaxBlockSize>
|
| 656 |
+
_CG_QUALIFIER thread_block this_thread_block(block_tile_memory<MaxBlockSize>& scratch) {
|
| 657 |
+
return (thread_block(scratch));
|
| 658 |
+
}
|
| 659 |
+
#endif
|
| 660 |
+
|
| 661 |
+
/**
|
| 662 |
+
* class coalesced_group
|
| 663 |
+
*
|
| 664 |
+
* A group representing the current set of converged threads in a warp.
|
| 665 |
+
* The size of the group is not guaranteed and it may return a group of
|
| 666 |
+
* only one thread (itself).
|
| 667 |
+
*
|
| 668 |
+
* This group exposes warp-synchronous builtins.
|
| 669 |
+
* Constructed via coalesced_threads();
|
| 670 |
+
*/
|
| 671 |
+
class coalesced_group : public thread_group_base<details::coalesced_group_id>
|
| 672 |
+
{
|
| 673 |
+
private:
|
| 674 |
+
friend _CG_QUALIFIER coalesced_group coalesced_threads();
|
| 675 |
+
friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz);
|
| 676 |
+
friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz);
|
| 677 |
+
friend class details::_coalesced_group_data_access;
|
| 678 |
+
|
| 679 |
+
_CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const {
|
| 680 |
+
unsigned int member_pack = 0;
|
| 681 |
+
unsigned int member_rank = 0;
|
| 682 |
+
for (int bit_idx = 0; bit_idx < 32; bit_idx++) {
|
| 683 |
+
unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
|
| 684 |
+
if (lane_bit) {
|
| 685 |
+
if (laneMask & lane_bit)
|
| 686 |
+
member_pack |= 1 << member_rank;
|
| 687 |
+
member_rank++;
|
| 688 |
+
}
|
| 689 |
+
}
|
| 690 |
+
return (member_pack);
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
// Internal Use
|
| 694 |
+
_CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const {
|
| 695 |
+
const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0);
|
| 696 |
+
|
| 697 |
+
// Invalid, immediately fail
|
| 698 |
+
if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) {
|
| 699 |
+
details::abort();
|
| 700 |
+
return (coalesced_group(0));
|
| 701 |
+
}
|
| 702 |
+
if (size() <= tilesz) {
|
| 703 |
+
return (*this);
|
| 704 |
+
}
|
| 705 |
+
|
| 706 |
+
if ((_data.coalesced.is_tiled == true) && pow2_tilesz) {
|
| 707 |
+
unsigned int base_offset = (thread_rank() & (~(tilesz - 1)));
|
| 708 |
+
unsigned int masklength = min((unsigned int)size() - base_offset, tilesz);
|
| 709 |
+
unsigned int mask = (unsigned int)(-1) >> (32 - masklength);
|
| 710 |
+
|
| 711 |
+
mask <<= (details::laneid() & ~(tilesz - 1));
|
| 712 |
+
coalesced_group coalesced_tile = coalesced_group(mask);
|
| 713 |
+
coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz;
|
| 714 |
+
coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
|
| 715 |
+
coalesced_tile._data.coalesced.is_tiled = true;
|
| 716 |
+
return (coalesced_tile);
|
| 717 |
+
}
|
| 718 |
+
else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) {
|
| 719 |
+
unsigned int mask = 0;
|
| 720 |
+
unsigned int member_rank = 0;
|
| 721 |
+
int seen_lanes = (thread_rank() / tilesz) * tilesz;
|
| 722 |
+
for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) {
|
| 723 |
+
unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx);
|
| 724 |
+
if (lane_bit) {
|
| 725 |
+
if (seen_lanes <= 0 && member_rank < tilesz) {
|
| 726 |
+
mask |= lane_bit;
|
| 727 |
+
member_rank++;
|
| 728 |
+
}
|
| 729 |
+
seen_lanes--;
|
| 730 |
+
}
|
| 731 |
+
}
|
| 732 |
+
coalesced_group coalesced_tile = coalesced_group(mask);
|
| 733 |
+
// Override parent with the size of this group
|
| 734 |
+
coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz;
|
| 735 |
+
coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz;
|
| 736 |
+
return coalesced_tile;
|
| 737 |
+
}
|
| 738 |
+
else {
|
| 739 |
+
// None in _CG_VERSION 1000
|
| 740 |
+
details::abort();
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
return (coalesced_group(0));
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
protected:
|
| 747 |
+
_CG_QUALIFIER coalesced_group(unsigned int mask) {
|
| 748 |
+
_data.coalesced.mask = mask;
|
| 749 |
+
_data.coalesced.size = __popc(mask);
|
| 750 |
+
_data.coalesced.metaGroupRank = 0;
|
| 751 |
+
_data.coalesced.metaGroupSize = 1;
|
| 752 |
+
_data.coalesced.is_tiled = false;
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
_CG_QUALIFIER unsigned int get_mask() const {
|
| 756 |
+
return (_data.coalesced.mask);
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
public:
|
| 760 |
+
_CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
|
| 761 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
|
| 762 |
+
|
| 763 |
+
_CG_QUALIFIER unsigned int num_threads() const {
|
| 764 |
+
return _data.coalesced.size;
|
| 765 |
+
}
|
| 766 |
+
|
| 767 |
+
_CG_QUALIFIER unsigned int size() const {
|
| 768 |
+
return num_threads();
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
_CG_QUALIFIER unsigned int thread_rank() const {
|
| 772 |
+
return (__popc(_data.coalesced.mask & details::lanemask32_lt()));
|
| 773 |
+
}
|
| 774 |
+
|
| 775 |
+
// Rank of this group in the upper level of the hierarchy
|
| 776 |
+
_CG_QUALIFIER unsigned int meta_group_rank() const {
|
| 777 |
+
return _data.coalesced.metaGroupRank;
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
// Total num partitions created out of all CTAs when the group was created
|
| 781 |
+
_CG_QUALIFIER unsigned int meta_group_size() const {
|
| 782 |
+
return _data.coalesced.metaGroupSize;
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
_CG_QUALIFIER void sync() const {
|
| 786 |
+
__syncwarp(_data.coalesced.mask);
|
| 787 |
+
}
|
| 788 |
+
|
| 789 |
+
#ifdef _CG_CPP11_FEATURES
|
| 790 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 791 |
+
_CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
|
| 792 |
+
unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 :
|
| 793 |
+
(size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1));
|
| 794 |
+
|
| 795 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl(
|
| 796 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
|
| 797 |
+
}
|
| 798 |
+
|
| 799 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 800 |
+
_CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
|
| 801 |
+
if (size() == 32) {
|
| 802 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl_down(
|
| 803 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
|
| 807 |
+
|
| 808 |
+
if (lane >= 32)
|
| 809 |
+
lane = details::laneid();
|
| 810 |
+
|
| 811 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl(
|
| 812 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
|
| 813 |
+
}
|
| 814 |
+
|
| 815 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 816 |
+
_CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const {
|
| 817 |
+
if (size() == 32) {
|
| 818 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl_up(
|
| 819 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), 0xFFFFFFFF, delta, 32);
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
|
| 823 |
+
if (lane >= 32)
|
| 824 |
+
lane = details::laneid();
|
| 825 |
+
|
| 826 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl(
|
| 827 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), _data.coalesced.mask, lane, 32);
|
| 828 |
+
}
|
| 829 |
+
#else
|
| 830 |
+
template <typename TyIntegral>
|
| 831 |
+
_CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const {
|
| 832 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 833 |
+
unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 :
|
| 834 |
+
(size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1));
|
| 835 |
+
return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
|
| 836 |
+
}
|
| 837 |
+
|
| 838 |
+
template <typename TyIntegral>
|
| 839 |
+
_CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const {
|
| 840 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 841 |
+
if (size() == 32) {
|
| 842 |
+
return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32));
|
| 843 |
+
}
|
| 844 |
+
unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1));
|
| 845 |
+
if (lane >= 32) lane = details::laneid();
|
| 846 |
+
return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
|
| 847 |
+
}
|
| 848 |
+
|
| 849 |
+
template <typename TyIntegral>
|
| 850 |
+
_CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const {
|
| 851 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 852 |
+
if (size() == 32) {
|
| 853 |
+
return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32));
|
| 854 |
+
}
|
| 855 |
+
unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1);
|
| 856 |
+
if (lane >= 32) lane = details::laneid();
|
| 857 |
+
return (__shfl_sync(_data.coalesced.mask, var, lane, 32));
|
| 858 |
+
}
|
| 859 |
+
#endif
|
| 860 |
+
|
| 861 |
+
_CG_QUALIFIER int any(int predicate) const {
|
| 862 |
+
return (__ballot_sync(_data.coalesced.mask, predicate) != 0);
|
| 863 |
+
}
|
| 864 |
+
_CG_QUALIFIER int all(int predicate) const {
|
| 865 |
+
return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask);
|
| 866 |
+
}
|
| 867 |
+
_CG_QUALIFIER unsigned int ballot(int predicate) const {
|
| 868 |
+
if (size() == 32) {
|
| 869 |
+
return (__ballot_sync(0xFFFFFFFF, predicate));
|
| 870 |
+
}
|
| 871 |
+
unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate);
|
| 872 |
+
return (_packLanes(lane_ballot));
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
#ifdef _CG_HAS_MATCH_COLLECTIVE
|
| 876 |
+
|
| 877 |
+
template <typename TyIntegral>
|
| 878 |
+
_CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
|
| 879 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 880 |
+
if (size() == 32) {
|
| 881 |
+
return (__match_any_sync(0xFFFFFFFF, val));
|
| 882 |
+
}
|
| 883 |
+
unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val);
|
| 884 |
+
return (_packLanes(lane_match));
|
| 885 |
+
}
|
| 886 |
+
|
| 887 |
+
template <typename TyIntegral>
|
| 888 |
+
_CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
|
| 889 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 890 |
+
if (size() == 32) {
|
| 891 |
+
return (__match_all_sync(0xFFFFFFFF, val, &pred));
|
| 892 |
+
}
|
| 893 |
+
unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred);
|
| 894 |
+
return (_packLanes(lane_match));
|
| 895 |
+
}
|
| 896 |
+
|
| 897 |
+
#endif /* !_CG_HAS_MATCH_COLLECTIVE */
|
| 898 |
+
|
| 899 |
+
};
|
| 900 |
+
|
| 901 |
+
_CG_QUALIFIER coalesced_group coalesced_threads()
|
| 902 |
+
{
|
| 903 |
+
return (coalesced_group(__activemask()));
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
namespace details {
|
| 907 |
+
template <unsigned int Size> struct verify_thread_block_tile_size;
|
| 908 |
+
template <> struct verify_thread_block_tile_size<32> { typedef void OK; };
|
| 909 |
+
template <> struct verify_thread_block_tile_size<16> { typedef void OK; };
|
| 910 |
+
template <> struct verify_thread_block_tile_size<8> { typedef void OK; };
|
| 911 |
+
template <> struct verify_thread_block_tile_size<4> { typedef void OK; };
|
| 912 |
+
template <> struct verify_thread_block_tile_size<2> { typedef void OK; };
|
| 913 |
+
template <> struct verify_thread_block_tile_size<1> { typedef void OK; };
|
| 914 |
+
|
| 915 |
+
#ifdef _CG_CPP11_FEATURES
|
| 916 |
+
template <unsigned int Size>
|
| 917 |
+
using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant<bool, (Size & (Size - 1)) == 0>;
|
| 918 |
+
|
| 919 |
+
template <unsigned int Size>
|
| 920 |
+
using _is_single_warp = _CG_STL_NAMESPACE::integral_constant<bool, Size <= 32>;
|
| 921 |
+
template <unsigned int Size>
|
| 922 |
+
using _is_multi_warp =
|
| 923 |
+
_CG_STL_NAMESPACE::integral_constant<bool, (Size > 32) && (Size <= 1024)>;
|
| 924 |
+
|
| 925 |
+
template <unsigned int Size>
|
| 926 |
+
using _is_valid_single_warp_tile =
|
| 927 |
+
_CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_single_warp<Size>::value>;
|
| 928 |
+
template <unsigned int Size>
|
| 929 |
+
using _is_valid_multi_warp_tile =
|
| 930 |
+
_CG_STL_NAMESPACE::integral_constant<bool, _is_power_of_2<Size>::value && _is_multi_warp<Size>::value>;
|
| 931 |
+
#else
|
| 932 |
+
template <unsigned int Size>
|
| 933 |
+
struct _is_multi_warp {
|
| 934 |
+
static const bool value = false;
|
| 935 |
+
};
|
| 936 |
+
#endif
|
| 937 |
+
}
|
| 938 |
+
|
| 939 |
+
template <unsigned int Size>
|
| 940 |
+
class __static_size_tile_base
|
| 941 |
+
{
|
| 942 |
+
protected:
|
| 943 |
+
_CG_STATIC_CONST_DECL unsigned int numThreads = Size;
|
| 944 |
+
|
| 945 |
+
public:
|
| 946 |
+
_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block)
|
| 947 |
+
|
| 948 |
+
// Rank of thread within tile
|
| 949 |
+
_CG_STATIC_QUALIFIER unsigned int thread_rank() {
|
| 950 |
+
return (details::cta::thread_rank() & (numThreads - 1));
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
// Number of threads within tile
|
| 954 |
+
_CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() {
|
| 955 |
+
return numThreads;
|
| 956 |
+
}
|
| 957 |
+
|
| 958 |
+
_CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() {
|
| 959 |
+
return num_threads();
|
| 960 |
+
}
|
| 961 |
+
};
|
| 962 |
+
|
| 963 |
+
template <unsigned int Size>
|
| 964 |
+
class __static_size_thread_block_tile_base : public __static_size_tile_base<Size>
|
| 965 |
+
{
|
| 966 |
+
friend class details::_coalesced_group_data_access;
|
| 967 |
+
typedef details::tile::tile_helpers<Size> th;
|
| 968 |
+
|
| 969 |
+
#ifdef _CG_CPP11_FEATURES
|
| 970 |
+
static_assert(details::_is_valid_single_warp_tile<Size>::value, "Size must be one of 1/2/4/8/16/32");
|
| 971 |
+
#else
|
| 972 |
+
typedef typename details::verify_thread_block_tile_size<Size>::OK valid;
|
| 973 |
+
#endif
|
| 974 |
+
using __static_size_tile_base<Size>::numThreads;
|
| 975 |
+
_CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF;
|
| 976 |
+
|
| 977 |
+
protected:
|
| 978 |
+
_CG_STATIC_QUALIFIER unsigned int build_mask() {
|
| 979 |
+
unsigned int mask = fullMask;
|
| 980 |
+
if (numThreads != 32) {
|
| 981 |
+
// [0,31] representing the current active thread in the warp
|
| 982 |
+
unsigned int laneId = details::laneid();
|
| 983 |
+
// shift mask according to the partition it belongs to
|
| 984 |
+
mask = th::tileMask << (laneId & ~(th::laneMask));
|
| 985 |
+
}
|
| 986 |
+
return (mask);
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
public:
|
| 990 |
+
_CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id;
|
| 991 |
+
|
| 992 |
+
_CG_STATIC_QUALIFIER void sync() {
|
| 993 |
+
__syncwarp(build_mask());
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
#ifdef _CG_CPP11_FEATURES
|
| 997 |
+
// PTX supported collectives
|
| 998 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 999 |
+
_CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const {
|
| 1000 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl(
|
| 1001 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), srcRank, numThreads);
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 1005 |
+
_CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const {
|
| 1006 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl_down(
|
| 1007 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
|
| 1008 |
+
}
|
| 1009 |
+
|
| 1010 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 1011 |
+
_CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const {
|
| 1012 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl_up(
|
| 1013 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), delta, numThreads);
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
template <typename TyElem, typename TyRet = details::remove_qual<TyElem>>
|
| 1017 |
+
_CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const {
|
| 1018 |
+
return details::tile::shuffle_dispatch<TyElem>::shfl_xor(
|
| 1019 |
+
_CG_STL_NAMESPACE::forward<TyElem>(elem), build_mask(), laneMask, numThreads);
|
| 1020 |
+
}
|
| 1021 |
+
#else
|
| 1022 |
+
template <typename TyIntegral>
|
| 1023 |
+
_CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const {
|
| 1024 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 1025 |
+
return (__shfl_sync(build_mask(), var, srcRank, numThreads));
|
| 1026 |
+
}
|
| 1027 |
+
|
| 1028 |
+
template <typename TyIntegral>
|
| 1029 |
+
_CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const {
|
| 1030 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 1031 |
+
return (__shfl_down_sync(build_mask(), var, delta, numThreads));
|
| 1032 |
+
}
|
| 1033 |
+
|
| 1034 |
+
template <typename TyIntegral>
|
| 1035 |
+
_CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const {
|
| 1036 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 1037 |
+
return (__shfl_up_sync(build_mask(), var, delta, numThreads));
|
| 1038 |
+
}
|
| 1039 |
+
|
| 1040 |
+
template <typename TyIntegral>
|
| 1041 |
+
_CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const {
|
| 1042 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 1043 |
+
return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads));
|
| 1044 |
+
}
|
| 1045 |
+
#endif //_CG_CPP11_FEATURES
|
| 1046 |
+
|
| 1047 |
+
_CG_QUALIFIER int any(int predicate) const {
|
| 1048 |
+
unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
|
| 1049 |
+
return (lane_ballot != 0);
|
| 1050 |
+
}
|
| 1051 |
+
_CG_QUALIFIER int all(int predicate) const {
|
| 1052 |
+
unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
|
| 1053 |
+
return (lane_ballot == build_mask());
|
| 1054 |
+
}
|
| 1055 |
+
_CG_QUALIFIER unsigned int ballot(int predicate) const {
|
| 1056 |
+
unsigned int lane_ballot = __ballot_sync(build_mask(), predicate);
|
| 1057 |
+
return (lane_ballot >> (details::laneid() & (~(th::laneMask))));
|
| 1058 |
+
}
|
| 1059 |
+
|
| 1060 |
+
#ifdef _CG_HAS_MATCH_COLLECTIVE
|
| 1061 |
+
template <typename TyIntegral>
|
| 1062 |
+
_CG_QUALIFIER unsigned int match_any(TyIntegral val) const {
|
| 1063 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 1064 |
+
unsigned int lane_match = __match_any_sync(build_mask(), val);
|
| 1065 |
+
return (lane_match >> (details::laneid() & (~(th::laneMask))));
|
| 1066 |
+
}
|
| 1067 |
+
|
| 1068 |
+
template <typename TyIntegral>
|
| 1069 |
+
_CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const {
|
| 1070 |
+
details::assert_if_not_arithmetic<TyIntegral>();
|
| 1071 |
+
unsigned int lane_match = __match_all_sync(build_mask(), val, &pred);
|
| 1072 |
+
return (lane_match >> (details::laneid() & (~(th::laneMask))));
|
| 1073 |
+
}
|
| 1074 |
+
#endif
|
| 1075 |
+
|
| 1076 |
+
};
|
| 1077 |
+
|
| 1078 |
+
template <unsigned int Size, typename ParentT>
|
| 1079 |
+
class __static_parent_thread_block_tile_base
|
| 1080 |
+
{
|
| 1081 |
+
public:
|
| 1082 |
+
// Rank of this group in the upper level of the hierarchy
|
| 1083 |
+
_CG_STATIC_QUALIFIER unsigned int meta_group_rank() {
|
| 1084 |
+
return ParentT::thread_rank() / Size;
|
| 1085 |
+
}
|
| 1086 |
+
|
| 1087 |
+
// Total num partitions created out of all CTAs when the group was created
|
| 1088 |
+
_CG_STATIC_QUALIFIER unsigned int meta_group_size() {
|
| 1089 |
+
return (ParentT::size() + Size - 1) / Size;
|
| 1090 |
+
}
|
| 1091 |
+
};
|
| 1092 |
+
|
| 1093 |
+
/**
|
| 1094 |
+
* class thread_block_tile<unsigned int Size, ParentT = void>
|
| 1095 |
+
*
|
| 1096 |
+
* Statically-sized group type, representing one tile of a thread block.
|
| 1097 |
+
* The only specializations currently supported are those with native
|
| 1098 |
+
* hardware support (1/2/4/8/16/32)
|
| 1099 |
+
*
|
| 1100 |
+
* This group exposes warp-synchronous builtins.
|
| 1101 |
+
* Can only be constructed via tiled_partition<Size>(ParentT&)
|
| 1102 |
+
*/
|
| 1103 |
+
|
| 1104 |
+
template <unsigned int Size, typename ParentT = void>
|
| 1105 |
+
class __single_warp_thread_block_tile :
|
| 1106 |
+
public __static_size_thread_block_tile_base<Size>,
|
| 1107 |
+
public __static_parent_thread_block_tile_base<Size, ParentT>
|
| 1108 |
+
{
|
| 1109 |
+
typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
|
| 1110 |
+
friend class details::_coalesced_group_data_access;
|
| 1111 |
+
|
| 1112 |
+
protected:
|
| 1113 |
+
_CG_QUALIFIER __single_warp_thread_block_tile() { };
|
| 1114 |
+
_CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { };
|
| 1115 |
+
|
| 1116 |
+
_CG_STATIC_QUALIFIER unsigned int get_mask() {
|
| 1117 |
+
return __static_size_thread_block_tile_base<Size>::build_mask();
|
| 1118 |
+
}
|
| 1119 |
+
};
|
| 1120 |
+
|
| 1121 |
+
template <unsigned int Size>
|
| 1122 |
+
class __single_warp_thread_block_tile<Size, void> :
|
| 1123 |
+
public __static_size_thread_block_tile_base<Size>,
|
| 1124 |
+
public thread_group_base<details::coalesced_group_id>
|
| 1125 |
+
{
|
| 1126 |
+
_CG_STATIC_CONST_DECL unsigned int numThreads = Size;
|
| 1127 |
+
|
| 1128 |
+
template <unsigned int, typename ParentT> friend class __single_warp_thread_block_tile;
|
| 1129 |
+
friend class details::_coalesced_group_data_access;
|
| 1130 |
+
|
| 1131 |
+
typedef __static_size_thread_block_tile_base<numThreads> staticSizeBaseT;
|
| 1132 |
+
|
| 1133 |
+
protected:
|
| 1134 |
+
_CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) {
|
| 1135 |
+
_data.coalesced.mask = staticSizeBaseT::build_mask();
|
| 1136 |
+
_data.coalesced.size = numThreads;
|
| 1137 |
+
_data.coalesced.metaGroupRank = meta_group_rank;
|
| 1138 |
+
_data.coalesced.metaGroupSize = meta_group_size;
|
| 1139 |
+
_data.coalesced.is_tiled = true;
|
| 1140 |
+
}
|
| 1141 |
+
|
| 1142 |
+
_CG_QUALIFIER unsigned int get_mask() const {
|
| 1143 |
+
return (_data.coalesced.mask);
|
| 1144 |
+
}
|
| 1145 |
+
|
| 1146 |
+
public:
|
| 1147 |
+
using staticSizeBaseT::sync;
|
| 1148 |
+
using staticSizeBaseT::size;
|
| 1149 |
+
using staticSizeBaseT::num_threads;
|
| 1150 |
+
using staticSizeBaseT::thread_rank;
|
| 1151 |
+
|
| 1152 |
+
_CG_QUALIFIER unsigned int meta_group_rank() const {
|
| 1153 |
+
return _data.coalesced.metaGroupRank;
|
| 1154 |
+
}
|
| 1155 |
+
|
| 1156 |
+
_CG_QUALIFIER unsigned int meta_group_size() const {
|
| 1157 |
+
return _data.coalesced.metaGroupSize;
|
| 1158 |
+
}
|
| 1159 |
+
};
|
| 1160 |
+
|
| 1161 |
+
/**
|
| 1162 |
+
* Outer level API calls
|
| 1163 |
+
* void sync(GroupT) - see <group_type>.sync()
|
| 1164 |
+
* void thread_rank(GroupT) - see <group_type>.thread_rank()
|
| 1165 |
+
* void group_size(GroupT) - see <group_type>.size()
|
| 1166 |
+
*/
|
| 1167 |
+
template <class GroupT>
|
| 1168 |
+
_CG_QUALIFIER void sync(GroupT const &g)
|
| 1169 |
+
{
|
| 1170 |
+
g.sync();
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
// TODO: Use a static dispatch to determine appropriate return type
|
| 1174 |
+
// C++03 is stuck with unsigned long long for now
|
| 1175 |
+
#ifdef _CG_CPP11_FEATURES
|
| 1176 |
+
template <class GroupT>
|
| 1177 |
+
_CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) {
|
| 1178 |
+
return g.thread_rank();
|
| 1179 |
+
}
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
template <class GroupT>
|
| 1183 |
+
_CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) {
|
| 1184 |
+
return g.num_threads();
|
| 1185 |
+
}
|
| 1186 |
+
#else
|
| 1187 |
+
template <class GroupT>
|
| 1188 |
+
_CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) {
|
| 1189 |
+
return static_cast<unsigned long long>(g.thread_rank());
|
| 1190 |
+
}
|
| 1191 |
+
|
| 1192 |
+
|
| 1193 |
+
template <class GroupT>
|
| 1194 |
+
_CG_QUALIFIER unsigned long long group_size(GroupT const &g) {
|
| 1195 |
+
return static_cast<unsigned long long>(g.num_threads());
|
| 1196 |
+
}
|
| 1197 |
+
#endif
|
| 1198 |
+
|
| 1199 |
+
|
| 1200 |
+
/**
|
| 1201 |
+
* tiled_partition
|
| 1202 |
+
*
|
| 1203 |
+
* The tiled_partition(parent, tilesz) method is a collective operation that
|
| 1204 |
+
* partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
|
| 1205 |
+
*
|
| 1206 |
+
* A total of ((size(parent)+tilesz-1)/tilesz) subgroups will
|
| 1207 |
+
* be created where threads having identical k = (thread_rank(parent)/tilesz)
|
| 1208 |
+
* will be members of the same subgroup.
|
| 1209 |
+
*
|
| 1210 |
+
* The implementation may cause the calling thread to wait until all the members
|
| 1211 |
+
* of the parent group have invoked the operation before resuming execution.
|
| 1212 |
+
*
|
| 1213 |
+
* Functionality is limited to power-of-two sized subgorup instances of at most
|
| 1214 |
+
* 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be
|
| 1215 |
+
* tiled_partition() in _CG_VERSION 1000.
|
| 1216 |
+
*/
|
| 1217 |
+
_CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz)
|
| 1218 |
+
{
|
| 1219 |
+
if (parent.get_type() == details::coalesced_group_id) {
|
| 1220 |
+
const coalesced_group *_cg = static_cast<const coalesced_group*>(&parent);
|
| 1221 |
+
return _cg->_get_tiled_threads(tilesz);
|
| 1222 |
+
}
|
| 1223 |
+
else {
|
| 1224 |
+
const thread_block *_tb = static_cast<const thread_block*>(&parent);
|
| 1225 |
+
return _tb->_get_tiled_threads(tilesz);
|
| 1226 |
+
}
|
| 1227 |
+
}
|
| 1228 |
+
|
| 1229 |
+
// Thread block type overload: returns a basic thread_group for now (may be specialized later)
|
| 1230 |
+
_CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz)
|
| 1231 |
+
{
|
| 1232 |
+
return (parent._get_tiled_threads(tilesz));
|
| 1233 |
+
}
|
| 1234 |
+
|
| 1235 |
+
// Coalesced group type overload: retains its ability to stay coalesced
|
| 1236 |
+
_CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz)
|
| 1237 |
+
{
|
| 1238 |
+
return (parent._get_tiled_threads(tilesz));
|
| 1239 |
+
}
|
| 1240 |
+
|
| 1241 |
+
namespace details {
|
| 1242 |
+
template <unsigned int Size, typename ParentT>
|
| 1243 |
+
class internal_thread_block_tile : public __single_warp_thread_block_tile<Size, ParentT> {};
|
| 1244 |
+
|
| 1245 |
+
template <unsigned int Size, typename ParentT>
|
| 1246 |
+
_CG_QUALIFIER internal_thread_block_tile<Size, ParentT> tiled_partition_internal() {
|
| 1247 |
+
return internal_thread_block_tile<Size, ParentT>();
|
| 1248 |
+
}
|
| 1249 |
+
|
| 1250 |
+
template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
|
| 1251 |
+
_CG_QUALIFIER TyVal multi_warp_collectives_helper(
|
| 1252 |
+
const GroupT& group,
|
| 1253 |
+
WarpLambda warp_lambda,
|
| 1254 |
+
InterWarpLambda inter_warp_lambda) {
|
| 1255 |
+
return group.template collectives_scheme<TyVal>(warp_lambda, inter_warp_lambda);
|
| 1256 |
+
}
|
| 1257 |
+
|
| 1258 |
+
template <typename T, typename GroupT>
|
| 1259 |
+
_CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) {
|
| 1260 |
+
return group.template get_scratch_location<T>(warp_id);
|
| 1261 |
+
}
|
| 1262 |
+
|
| 1263 |
+
template <typename GroupT>
|
| 1264 |
+
_CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) {
|
| 1265 |
+
return group.get_sync_location();
|
| 1266 |
+
}
|
| 1267 |
+
|
| 1268 |
+
}
|
| 1269 |
+
/**
|
| 1270 |
+
* tiled_partition<tilesz>
|
| 1271 |
+
*
|
| 1272 |
+
* The tiled_partition<tilesz>(parent) method is a collective operation that
|
| 1273 |
+
* partitions the parent group into a one-dimensional, row-major, tiling of subgroups.
|
| 1274 |
+
*
|
| 1275 |
+
* A total of ((size(parent)/tilesz) subgroups will be created,
|
| 1276 |
+
* therefore the parent group size must be evenly divisible by the tilesz.
|
| 1277 |
+
* The allow parent groups are thread_block or thread_block_tile<size>.
|
| 1278 |
+
*
|
| 1279 |
+
* The implementation may cause the calling thread to wait until all the members
|
| 1280 |
+
* of the parent group have invoked the operation before resuming execution.
|
| 1281 |
+
*
|
| 1282 |
+
* Functionality is limited to native hardware sizes, 1/2/4/8/16/32.
|
| 1283 |
+
* The size(parent) must be greater than the template Size parameter
|
| 1284 |
+
* otherwise the results are undefined.
|
| 1285 |
+
*/
|
| 1286 |
+
|
| 1287 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 1288 |
+
template <unsigned int Size>
|
| 1289 |
+
class __static_size_multi_warp_tile_base : public __static_size_tile_base<Size>
|
| 1290 |
+
{
|
| 1291 |
+
static_assert(details::_is_valid_multi_warp_tile<Size>::value, "Size must be one of 64/128/256/512");
|
| 1292 |
+
|
| 1293 |
+
template <typename TyVal, typename GroupT, typename WarpLambda, typename InterWarpLambda>
|
| 1294 |
+
friend __device__ TyVal details::multi_warp_collectives_helper(
|
| 1295 |
+
const GroupT& group,
|
| 1296 |
+
WarpLambda warp_lambda,
|
| 1297 |
+
InterWarpLambda inter_warp_lambda);
|
| 1298 |
+
template <typename T, typename GroupT>
|
| 1299 |
+
friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id);
|
| 1300 |
+
template <typename GroupT>
|
| 1301 |
+
friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group);
|
| 1302 |
+
template <unsigned int OtherSize>
|
| 1303 |
+
friend class __static_size_multi_warp_tile_base;
|
| 1304 |
+
using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base<Size>>;
|
| 1305 |
+
using ThisType = __static_size_multi_warp_tile_base<Size>;
|
| 1306 |
+
_CG_STATIC_CONST_DECL int numWarps = Size / 32;
|
| 1307 |
+
|
| 1308 |
+
protected:
|
| 1309 |
+
details::multi_warp_scratch* const tile_memory;
|
| 1310 |
+
|
| 1311 |
+
template <typename GroupT>
|
| 1312 |
+
_CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) {
|
| 1313 |
+
#if defined(_CG_HAS_RESERVED_SHARED)
|
| 1314 |
+
details::sync_warps_reset(get_sync_location(), details::cta::thread_rank());
|
| 1315 |
+
g.sync();
|
| 1316 |
+
#endif
|
| 1317 |
+
}
|
| 1318 |
+
|
| 1319 |
+
|
| 1320 |
+
private:
|
| 1321 |
+
_CG_QUALIFIER details::barrier_t* get_sync_location() const {
|
| 1322 |
+
// Different group sizes use different barriers, all groups of a given size share one barrier.
|
| 1323 |
+
unsigned int sync_id = details::log2(Size / 64);
|
| 1324 |
+
return &tile_memory->barriers[sync_id];
|
| 1325 |
+
}
|
| 1326 |
+
|
| 1327 |
+
template <typename T>
|
| 1328 |
+
_CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const {
|
| 1329 |
+
unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id;
|
| 1330 |
+
return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
|
| 1331 |
+
}
|
| 1332 |
+
|
| 1333 |
+
template <typename T>
|
| 1334 |
+
_CG_QUALIFIER T* get_scratch_location() const {
|
| 1335 |
+
unsigned int scratch_id = details::cta::thread_rank() / 32;
|
| 1336 |
+
return reinterpret_cast<T*>(&tile_memory->communication_memory[scratch_id]);
|
| 1337 |
+
}
|
| 1338 |
+
|
| 1339 |
+
template <typename TyVal>
|
| 1340 |
+
_CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const {
|
| 1341 |
+
unsigned int src_warp = src / 32;
|
| 1342 |
+
auto warp = details::tiled_partition_internal<32, ThisType>();
|
| 1343 |
+
details::barrier_t* sync_location = get_sync_location();
|
| 1344 |
+
|
| 1345 |
+
// Get warp slot of the source threads warp.
|
| 1346 |
+
TyVal* warp_scratch_location = get_scratch_location<TyVal>(src_warp);
|
| 1347 |
+
|
| 1348 |
+
if (warp.meta_group_rank() == src_warp) {
|
| 1349 |
+
warp.sync();
|
| 1350 |
+
// Put shuffled value into my warp slot and let my warp arrive at the barrier.
|
| 1351 |
+
if (thread_rank() == src) {
|
| 1352 |
+
*warp_scratch_location = val;
|
| 1353 |
+
}
|
| 1354 |
+
details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps);
|
| 1355 |
+
TyVal result = *warp_scratch_location;
|
| 1356 |
+
details::sync_warps_wait(sync_location, details::cta::thread_rank());
|
| 1357 |
+
return result;
|
| 1358 |
+
}
|
| 1359 |
+
else {
|
| 1360 |
+
// Wait for the source warp to arrive on the barrier.
|
| 1361 |
+
details::sync_warps_wait_for_specific_warp(sync_location,
|
| 1362 |
+
(details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp));
|
| 1363 |
+
TyVal result = *warp_scratch_location;
|
| 1364 |
+
details::sync_warps(sync_location, details::cta::thread_rank(), numWarps);
|
| 1365 |
+
return result;
|
| 1366 |
+
}
|
| 1367 |
+
}
|
| 1368 |
+
|
| 1369 |
+
template <typename TyVal, typename WarpLambda, typename InterWarpLambda>
|
| 1370 |
+
_CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const {
|
| 1371 |
+
static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
|
| 1372 |
+
"Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
|
| 1373 |
+
auto warp = details::tiled_partition_internal<32, ThisType>();
|
| 1374 |
+
details::barrier_t* sync_location = get_sync_location();
|
| 1375 |
+
TyVal* warp_scratch_location = get_scratch_location<TyVal>();
|
| 1376 |
+
|
| 1377 |
+
warp_lambda(warp, warp_scratch_location);
|
| 1378 |
+
|
| 1379 |
+
if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) {
|
| 1380 |
+
auto subwarp = details::tiled_partition_internal<numWarps, decltype(warp)>();
|
| 1381 |
+
if (subwarp.meta_group_rank() == 0) {
|
| 1382 |
+
TyVal* thread_scratch_location = get_scratch_location<TyVal>(subwarp.thread_rank());
|
| 1383 |
+
inter_warp_lambda(subwarp, thread_scratch_location);
|
| 1384 |
+
}
|
| 1385 |
+
warp.sync();
|
| 1386 |
+
details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps);
|
| 1387 |
+
}
|
| 1388 |
+
TyVal result = *warp_scratch_location;
|
| 1389 |
+
return result;
|
| 1390 |
+
}
|
| 1391 |
+
|
| 1392 |
+
public:
|
| 1393 |
+
_CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id;
|
| 1394 |
+
|
| 1395 |
+
using __static_size_tile_base<Size>::thread_rank;
|
| 1396 |
+
|
| 1397 |
+
template <typename TyVal>
|
| 1398 |
+
_CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const {
|
| 1399 |
+
static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size,
|
| 1400 |
+
"Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes");
|
| 1401 |
+
return shfl_impl(val, src);
|
| 1402 |
+
}
|
| 1403 |
+
|
| 1404 |
+
_CG_QUALIFIER void sync() const {
|
| 1405 |
+
details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps);
|
| 1406 |
+
}
|
| 1407 |
+
|
| 1408 |
+
_CG_QUALIFIER int any(int predicate) const {
|
| 1409 |
+
auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
|
| 1410 |
+
*warp_scratch_location = __any_sync(0xFFFFFFFF, predicate);
|
| 1411 |
+
};
|
| 1412 |
+
auto inter_warp_lambda =
|
| 1413 |
+
[] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
|
| 1414 |
+
*thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
|
| 1415 |
+
};
|
| 1416 |
+
return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
|
| 1417 |
+
}
|
| 1418 |
+
|
| 1419 |
+
_CG_QUALIFIER int all(int predicate) const {
|
| 1420 |
+
auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) {
|
| 1421 |
+
*warp_scratch_location = __all_sync(0xFFFFFFFF, predicate);
|
| 1422 |
+
};
|
| 1423 |
+
auto inter_warp_lambda =
|
| 1424 |
+
[] (details::internal_thread_block_tile<numWarps, WarpType>& subwarp, int* thread_scratch_location) {
|
| 1425 |
+
*thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location);
|
| 1426 |
+
};
|
| 1427 |
+
return collectives_scheme<int>(warp_lambda, inter_warp_lambda);
|
| 1428 |
+
}
|
| 1429 |
+
};
|
| 1430 |
+
|
| 1431 |
+
|
| 1432 |
+
template <unsigned int Size, typename ParentT = void>
|
| 1433 |
+
class __multi_warp_thread_block_tile :
|
| 1434 |
+
public __static_size_multi_warp_tile_base<Size>,
|
| 1435 |
+
public __static_parent_thread_block_tile_base<Size, ParentT>
|
| 1436 |
+
{
|
| 1437 |
+
typedef __static_parent_thread_block_tile_base<Size, ParentT> staticParentBaseT;
|
| 1438 |
+
typedef __static_size_multi_warp_tile_base<Size> staticTileBaseT;
|
| 1439 |
+
protected:
|
| 1440 |
+
_CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) :
|
| 1441 |
+
__static_size_multi_warp_tile_base<Size>(g) {}
|
| 1442 |
+
};
|
| 1443 |
+
|
| 1444 |
+
template <unsigned int Size>
|
| 1445 |
+
class __multi_warp_thread_block_tile<Size, void> : public __static_size_multi_warp_tile_base<Size>
|
| 1446 |
+
{
|
| 1447 |
+
const unsigned int metaGroupRank;
|
| 1448 |
+
const unsigned int metaGroupSize;
|
| 1449 |
+
|
| 1450 |
+
protected:
|
| 1451 |
+
template <unsigned int OtherSize, typename ParentT>
|
| 1452 |
+
_CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile<OtherSize, ParentT>& g) :
|
| 1453 |
+
__static_size_multi_warp_tile_base<Size>(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {}
|
| 1454 |
+
|
| 1455 |
+
public:
|
| 1456 |
+
_CG_QUALIFIER unsigned int meta_group_rank() const {
|
| 1457 |
+
return metaGroupRank;
|
| 1458 |
+
}
|
| 1459 |
+
|
| 1460 |
+
_CG_QUALIFIER unsigned int meta_group_size() const {
|
| 1461 |
+
return metaGroupSize;
|
| 1462 |
+
}
|
| 1463 |
+
};
|
| 1464 |
+
#endif
|
| 1465 |
+
|
| 1466 |
+
template <unsigned int Size, typename ParentT = void>
|
| 1467 |
+
class thread_block_tile;
|
| 1468 |
+
|
| 1469 |
+
namespace details {
|
| 1470 |
+
template <unsigned int Size, typename ParentT, bool IsMultiWarp>
|
| 1471 |
+
class thread_block_tile_impl;
|
| 1472 |
+
|
| 1473 |
+
template <unsigned int Size, typename ParentT>
|
| 1474 |
+
class thread_block_tile_impl<Size, ParentT, false>: public __single_warp_thread_block_tile<Size, ParentT>
|
| 1475 |
+
{
|
| 1476 |
+
protected:
|
| 1477 |
+
template <unsigned int OtherSize, typename OtherParentT, bool OtherIsMultiWarp>
|
| 1478 |
+
_CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl<OtherSize, OtherParentT, OtherIsMultiWarp>& g) :
|
| 1479 |
+
__single_warp_thread_block_tile<Size, ParentT>(g.meta_group_rank(), g.meta_group_size()) {}
|
| 1480 |
+
|
| 1481 |
+
_CG_QUALIFIER thread_block_tile_impl(const thread_block& g) :
|
| 1482 |
+
__single_warp_thread_block_tile<Size, ParentT>() {}
|
| 1483 |
+
};
|
| 1484 |
+
|
| 1485 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 1486 |
+
template <unsigned int Size, typename ParentT>
|
| 1487 |
+
class thread_block_tile_impl<Size, ParentT, true> : public __multi_warp_thread_block_tile<Size, ParentT>
|
| 1488 |
+
{
|
| 1489 |
+
protected:
|
| 1490 |
+
template <typename GroupT>
|
| 1491 |
+
_CG_QUALIFIER thread_block_tile_impl(const GroupT& g) :
|
| 1492 |
+
__multi_warp_thread_block_tile<Size, ParentT>(g) {}
|
| 1493 |
+
};
|
| 1494 |
+
#else
|
| 1495 |
+
template <unsigned int Size, typename ParentT>
|
| 1496 |
+
class thread_block_tile_impl<Size, ParentT, true>
|
| 1497 |
+
{
|
| 1498 |
+
protected:
|
| 1499 |
+
template <typename GroupT>
|
| 1500 |
+
_CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {}
|
| 1501 |
+
};
|
| 1502 |
+
#endif
|
| 1503 |
+
}
|
| 1504 |
+
|
| 1505 |
+
template <unsigned int Size, typename ParentT>
|
| 1506 |
+
class thread_block_tile : public details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>
|
| 1507 |
+
{
|
| 1508 |
+
friend _CG_QUALIFIER thread_block_tile<1, void> this_thread();
|
| 1509 |
+
|
| 1510 |
+
protected:
|
| 1511 |
+
_CG_QUALIFIER thread_block_tile(const ParentT& g) :
|
| 1512 |
+
details::thread_block_tile_impl<Size, ParentT, details::_is_multi_warp<Size>::value>(g) {}
|
| 1513 |
+
|
| 1514 |
+
public:
|
| 1515 |
+
_CG_QUALIFIER operator thread_block_tile<Size, void>() const {
|
| 1516 |
+
return thread_block_tile<Size, void>(*this);
|
| 1517 |
+
}
|
| 1518 |
+
};
|
| 1519 |
+
|
| 1520 |
+
template <unsigned int Size>
|
| 1521 |
+
class thread_block_tile<Size, void> : public details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>
|
| 1522 |
+
{
|
| 1523 |
+
template <unsigned int, typename ParentT>
|
| 1524 |
+
friend class thread_block_tile;
|
| 1525 |
+
|
| 1526 |
+
protected:
|
| 1527 |
+
template <unsigned int OtherSize, typename OtherParentT>
|
| 1528 |
+
_CG_QUALIFIER thread_block_tile(const thread_block_tile<OtherSize, OtherParentT>& g) :
|
| 1529 |
+
details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
|
| 1530 |
+
|
| 1531 |
+
public:
|
| 1532 |
+
template <typename ParentT>
|
| 1533 |
+
_CG_QUALIFIER thread_block_tile(const thread_block_tile<Size, ParentT>& g) :
|
| 1534 |
+
details::thread_block_tile_impl<Size, void, details::_is_multi_warp<Size>::value>(g) {}
|
| 1535 |
+
};
|
| 1536 |
+
|
| 1537 |
+
namespace details {
|
| 1538 |
+
template <unsigned int Size, typename ParentT>
|
| 1539 |
+
struct tiled_partition_impl;
|
| 1540 |
+
|
| 1541 |
+
template <unsigned int Size>
|
| 1542 |
+
struct tiled_partition_impl<Size, thread_block> : public thread_block_tile<Size, thread_block> {
|
| 1543 |
+
_CG_QUALIFIER tiled_partition_impl(const thread_block& g) :
|
| 1544 |
+
thread_block_tile<Size, thread_block>(g) {}
|
| 1545 |
+
};
|
| 1546 |
+
|
| 1547 |
+
// ParentT = static thread_block_tile<ParentSize, GrandParent> specialization
|
| 1548 |
+
template <unsigned int Size, unsigned int ParentSize, typename GrandParent>
|
| 1549 |
+
struct tiled_partition_impl<Size, thread_block_tile<ParentSize, GrandParent> > :
|
| 1550 |
+
public thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> > {
|
| 1551 |
+
#ifdef _CG_CPP11_FEATURES
|
| 1552 |
+
static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size");
|
| 1553 |
+
#endif
|
| 1554 |
+
_CG_QUALIFIER tiled_partition_impl(const thread_block_tile<ParentSize, GrandParent>& g) :
|
| 1555 |
+
thread_block_tile<Size, thread_block_tile<ParentSize, GrandParent> >(g) {}
|
| 1556 |
+
};
|
| 1557 |
+
|
| 1558 |
+
}
|
| 1559 |
+
|
| 1560 |
+
template <unsigned int Size, typename ParentT>
|
| 1561 |
+
_CG_QUALIFIER thread_block_tile<Size, ParentT> tiled_partition(const ParentT& g)
|
| 1562 |
+
{
|
| 1563 |
+
return details::tiled_partition_impl<Size, ParentT>(g);
|
| 1564 |
+
}
|
| 1565 |
+
|
| 1566 |
+
/**
|
| 1567 |
+
* thread_group this_thread()
|
| 1568 |
+
*
|
| 1569 |
+
* Constructs a generic thread_group containing only the calling thread
|
| 1570 |
+
*/
|
| 1571 |
+
_CG_QUALIFIER thread_block_tile<1, void> this_thread()
|
| 1572 |
+
{
|
| 1573 |
+
// Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its
|
| 1574 |
+
// meta group rank and size set to 0 and 1 respectively.
|
| 1575 |
+
return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block());
|
| 1576 |
+
}
|
| 1577 |
+
|
| 1578 |
+
/**
|
| 1579 |
+
* <group_type>.sync()
|
| 1580 |
+
*
|
| 1581 |
+
* Executes a barrier across the group
|
| 1582 |
+
*
|
| 1583 |
+
* Implements both a compiler fence and an architectural fence to prevent,
|
| 1584 |
+
* memory reordering around the barrier.
|
| 1585 |
+
*/
|
| 1586 |
+
_CG_QUALIFIER void thread_group::sync() const
|
| 1587 |
+
{
|
| 1588 |
+
switch (_data.group.type) {
|
| 1589 |
+
case details::coalesced_group_id:
|
| 1590 |
+
cooperative_groups::sync(*static_cast<const coalesced_group*>(this));
|
| 1591 |
+
break;
|
| 1592 |
+
case details::thread_block_id:
|
| 1593 |
+
cooperative_groups::sync(*static_cast<const thread_block*>(this));
|
| 1594 |
+
break;
|
| 1595 |
+
case details::grid_group_id:
|
| 1596 |
+
cooperative_groups::sync(*static_cast<const grid_group*>(this));
|
| 1597 |
+
break;
|
| 1598 |
+
#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 1599 |
+
case details::multi_grid_group_id:
|
| 1600 |
+
cooperative_groups::sync(*static_cast<const multi_grid_group*>(this));
|
| 1601 |
+
break;
|
| 1602 |
+
#endif
|
| 1603 |
+
#if defined(_CG_HAS_CLUSTER_GROUP)
|
| 1604 |
+
case details::cluster_group_id:
|
| 1605 |
+
cooperative_groups::sync(*static_cast<const cluster_group*>(this));
|
| 1606 |
+
break;
|
| 1607 |
+
#endif
|
| 1608 |
+
default:
|
| 1609 |
+
break;
|
| 1610 |
+
}
|
| 1611 |
+
}
|
| 1612 |
+
|
| 1613 |
+
/**
|
| 1614 |
+
* <group_type>.size()
|
| 1615 |
+
*
|
| 1616 |
+
* Returns the total number of threads in the group.
|
| 1617 |
+
*/
|
| 1618 |
+
_CG_QUALIFIER unsigned long long thread_group::size() const
|
| 1619 |
+
{
|
| 1620 |
+
unsigned long long size = 0;
|
| 1621 |
+
switch (_data.group.type) {
|
| 1622 |
+
case details::coalesced_group_id:
|
| 1623 |
+
size = cooperative_groups::group_size(*static_cast<const coalesced_group*>(this));
|
| 1624 |
+
break;
|
| 1625 |
+
case details::thread_block_id:
|
| 1626 |
+
size = cooperative_groups::group_size(*static_cast<const thread_block*>(this));
|
| 1627 |
+
break;
|
| 1628 |
+
case details::grid_group_id:
|
| 1629 |
+
size = cooperative_groups::group_size(*static_cast<const grid_group*>(this));
|
| 1630 |
+
break;
|
| 1631 |
+
#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 1632 |
+
case details::multi_grid_group_id:
|
| 1633 |
+
size = cooperative_groups::group_size(*static_cast<const multi_grid_group*>(this));
|
| 1634 |
+
break;
|
| 1635 |
+
#endif
|
| 1636 |
+
#if defined(_CG_HAS_CLUSTER_GROUP)
|
| 1637 |
+
case details::cluster_group_id:
|
| 1638 |
+
size = cooperative_groups::group_size(*static_cast<const cluster_group*>(this));
|
| 1639 |
+
break;
|
| 1640 |
+
#endif
|
| 1641 |
+
default:
|
| 1642 |
+
break;
|
| 1643 |
+
}
|
| 1644 |
+
return size;
|
| 1645 |
+
}
|
| 1646 |
+
|
| 1647 |
+
/**
|
| 1648 |
+
* <group_type>.thread_rank()
|
| 1649 |
+
*
|
| 1650 |
+
* Returns the linearized rank of the calling thread along the interval [0, size()).
|
| 1651 |
+
*/
|
| 1652 |
+
_CG_QUALIFIER unsigned long long thread_group::thread_rank() const
|
| 1653 |
+
{
|
| 1654 |
+
unsigned long long rank = 0;
|
| 1655 |
+
switch (_data.group.type) {
|
| 1656 |
+
case details::coalesced_group_id:
|
| 1657 |
+
rank = cooperative_groups::thread_rank(*static_cast<const coalesced_group*>(this));
|
| 1658 |
+
break;
|
| 1659 |
+
case details::thread_block_id:
|
| 1660 |
+
rank = cooperative_groups::thread_rank(*static_cast<const thread_block*>(this));
|
| 1661 |
+
break;
|
| 1662 |
+
case details::grid_group_id:
|
| 1663 |
+
rank = cooperative_groups::thread_rank(*static_cast<const grid_group*>(this));
|
| 1664 |
+
break;
|
| 1665 |
+
#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL)
|
| 1666 |
+
case details::multi_grid_group_id:
|
| 1667 |
+
rank = cooperative_groups::thread_rank(*static_cast<const multi_grid_group*>(this));
|
| 1668 |
+
break;
|
| 1669 |
+
#endif
|
| 1670 |
+
#if defined(_CG_HAS_CLUSTER_GROUP)
|
| 1671 |
+
case details::cluster_group_id:
|
| 1672 |
+
rank = cooperative_groups::thread_rank(*static_cast<const cluster_group*>(this));
|
| 1673 |
+
break;
|
| 1674 |
+
#endif
|
| 1675 |
+
default:
|
| 1676 |
+
break;
|
| 1677 |
+
}
|
| 1678 |
+
return rank;
|
| 1679 |
+
}
|
| 1680 |
+
|
| 1681 |
+
_CG_END_NAMESPACE
|
| 1682 |
+
|
| 1683 |
+
#include <cooperative_groups/details/partitioning.h>
|
| 1684 |
+
#if (!defined(_MSC_VER) || defined(_WIN64))
|
| 1685 |
+
# include <cooperative_groups/details/invoke.h>
|
| 1686 |
+
#endif
|
| 1687 |
+
|
| 1688 |
+
# endif /* ! (__cplusplus, __CUDACC__) */
|
| 1689 |
+
|
| 1690 |
+
#endif /* !_COOPERATIVE_GROUPS_H_ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _CG_COALESCED_SCAN_H_
|
| 50 |
+
#define _CG_COALESCED_SCAN_H_
|
| 51 |
+
|
| 52 |
+
#include "info.h"
|
| 53 |
+
#include "helpers.h"
|
| 54 |
+
#include "cooperative_groups.h"
|
| 55 |
+
#include "partitioning.h"
|
| 56 |
+
#include "functional.h"
|
| 57 |
+
|
| 58 |
+
_CG_BEGIN_NAMESPACE
|
| 59 |
+
|
| 60 |
+
namespace details {
|
| 61 |
+
|
| 62 |
+
template <typename TyGroup, typename TyVal, typename TyOp>
|
| 63 |
+
_CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
|
| 64 |
+
auto out = val;
|
| 65 |
+
for (int mask = 1; mask < group.size(); mask <<= 1) {
|
| 66 |
+
auto tmp = group.shfl_up(out, mask);
|
| 67 |
+
if (mask <= group.thread_rank()) {
|
| 68 |
+
out = op(out, tmp);
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
return out;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
template <typename TyGroup, typename TyVal, typename TyOp>
|
| 76 |
+
_CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
|
| 77 |
+
const unsigned int groupSize = group.size();
|
| 78 |
+
auto out = val;
|
| 79 |
+
|
| 80 |
+
const unsigned int mask = details::_coalesced_group_data_access::get_mask(group);
|
| 81 |
+
unsigned int lanemask = details::lanemask32_lt() & mask;
|
| 82 |
+
unsigned int srcLane = details::laneid();
|
| 83 |
+
|
| 84 |
+
const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */
|
| 85 |
+
const unsigned int rank = __popc(lanemask);
|
| 86 |
+
|
| 87 |
+
for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) {
|
| 88 |
+
if (i <= rank) {
|
| 89 |
+
srcLane -= j;
|
| 90 |
+
j = i; /* maximum possible lane */
|
| 91 |
+
|
| 92 |
+
unsigned int begLane = base + rank - i; /* minimum possible lane */
|
| 93 |
+
|
| 94 |
+
/* Next source lane is in the range [ begLane .. srcLane ]
|
| 95 |
+
* If begLane < srcLane then do a binary search.
|
| 96 |
+
*/
|
| 97 |
+
while (begLane < srcLane) {
|
| 98 |
+
const unsigned int halfLane = (begLane + srcLane) >> 1;
|
| 99 |
+
const unsigned int halfMask = lanemask >> halfLane;
|
| 100 |
+
const unsigned int d = __popc(halfMask);
|
| 101 |
+
if (d < i) {
|
| 102 |
+
srcLane = halfLane - 1; /* halfLane too large */
|
| 103 |
+
}
|
| 104 |
+
else if ((i < d) || !(halfMask & 0x01)) {
|
| 105 |
+
begLane = halfLane + 1; /* halfLane too small */
|
| 106 |
+
}
|
| 107 |
+
else {
|
| 108 |
+
begLane = srcLane = halfLane; /* happen to hit */
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
auto tmp = details::tile::shuffle_dispatch<TyVal>::shfl(out, mask, srcLane, 32);
|
| 114 |
+
if (i <= rank) {
|
| 115 |
+
out = op(out, tmp);
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
return out;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
template <unsigned int TySize, typename ParentT, typename TyVal, typename TyOp>
|
| 122 |
+
_CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile<TySize, ParentT>& group,
|
| 123 |
+
TyVal&& val,
|
| 124 |
+
TyOp&& op) -> decltype(op(val, val)) {
|
| 125 |
+
return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
template <typename TyVal, typename TyOp>
|
| 129 |
+
_CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
|
| 130 |
+
if (group.size() == 32) {
|
| 131 |
+
return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
|
| 132 |
+
}
|
| 133 |
+
else {
|
| 134 |
+
return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
template <bool IntegralOptimized>
|
| 139 |
+
struct scan_choose_convertion;
|
| 140 |
+
|
| 141 |
+
template<>
|
| 142 |
+
struct scan_choose_convertion<true> {
|
| 143 |
+
template <typename TyGroup, typename TyRes, typename TyVal>
|
| 144 |
+
_CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
|
| 145 |
+
return result - val;
|
| 146 |
+
}
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
template<>
|
| 150 |
+
struct scan_choose_convertion<false> {
|
| 151 |
+
template <typename TyGroup, typename TyRes, typename TyVal>
|
| 152 |
+
_CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
|
| 153 |
+
auto ret = group.shfl_up(result, 1);
|
| 154 |
+
if (group.thread_rank() == 0) {
|
| 155 |
+
return {};
|
| 156 |
+
}
|
| 157 |
+
else {
|
| 158 |
+
return ret;
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
};
|
| 162 |
+
|
| 163 |
+
template <typename TyGroup, typename TyRes, typename TyVal, typename TyFn>
|
| 164 |
+
_CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
|
| 165 |
+
using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same<remove_qual<TyFn>, cooperative_groups::plus<remove_qual<TyVal>>>::value
|
| 166 |
+
&& _CG_STL_NAMESPACE::is_integral<remove_qual<TyVal>>::value>;
|
| 167 |
+
return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward<TyVal>(val));
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
} // details
|
| 171 |
+
|
| 172 |
+
_CG_END_NAMESPACE
|
| 173 |
+
|
| 174 |
+
#endif // _CG_COALESCED_SCAN_H_
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/invoke.h
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef _CG_INVOKE_H
|
| 51 |
+
#define _CG_INVOKE_H
|
| 52 |
+
|
| 53 |
+
#include "info.h"
|
| 54 |
+
#include "helpers.h"
|
| 55 |
+
|
| 56 |
+
#if defined(_CG_CPP11_FEATURES)
|
| 57 |
+
|
| 58 |
+
_CG_BEGIN_NAMESPACE
|
| 59 |
+
|
| 60 |
+
namespace details {
|
| 61 |
+
|
| 62 |
+
template <typename Group>
|
| 63 |
+
struct _elect_group_supported : _CG_STL_NAMESPACE::false_type {};
|
| 64 |
+
#ifdef _CG_HAS_INSTR_ELECT
|
| 65 |
+
template<>
|
| 66 |
+
struct _elect_group_supported<coalesced_group> : _CG_STL_NAMESPACE::true_type {};
|
| 67 |
+
template<unsigned int Size, typename Parent>
|
| 68 |
+
struct _elect_group_supported<thread_block_tile<Size, Parent>> :
|
| 69 |
+
_CG_STL_NAMESPACE::integral_constant<bool, (Size <= 32)> {};
|
| 70 |
+
#endif
|
| 71 |
+
|
| 72 |
+
template <typename Group>
|
| 73 |
+
struct elect_group_supported : public _elect_group_supported<details::remove_qual<Group>> {};
|
| 74 |
+
|
| 75 |
+
template<typename Group>
|
| 76 |
+
_CG_STATIC_QUALIFIER bool elect_one(const Group& group, unsigned int mask, unsigned int& leader_lane) {
|
| 77 |
+
int is_leader = 0;
|
| 78 |
+
#ifdef _CG_HAS_INSTR_ELECT
|
| 79 |
+
asm("{\n\t"
|
| 80 |
+
" .reg .pred p;\n\t"
|
| 81 |
+
" elect.sync %0|p, %2;\n\t"
|
| 82 |
+
" @p mov.s32 %1, 1;\n\t"
|
| 83 |
+
"}"
|
| 84 |
+
: "+r"(leader_lane), "+r"(is_leader) : "r" (mask));
|
| 85 |
+
#endif
|
| 86 |
+
return is_leader;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
template<bool UseElect>
|
| 90 |
+
struct invoke_one_impl {};
|
| 91 |
+
|
| 92 |
+
template<>
|
| 93 |
+
struct invoke_one_impl<true> {
|
| 94 |
+
template<typename Group, typename Fn, typename... Args>
|
| 95 |
+
_CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
|
| 96 |
+
auto mask = details::_coalesced_group_data_access::get_mask(group);
|
| 97 |
+
unsigned int leader_lane = 0;
|
| 98 |
+
|
| 99 |
+
if (elect_one(group, mask, leader_lane)) {
|
| 100 |
+
_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
template<typename Group, typename Fn, typename... Args>
|
| 105 |
+
_CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
|
| 106 |
+
-> typename _CG_STL_NAMESPACE::remove_reference<
|
| 107 |
+
decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
|
| 108 |
+
|
| 109 |
+
using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
|
| 110 |
+
details::remove_qual<ResultType> result;
|
| 111 |
+
auto mask = details::_coalesced_group_data_access::get_mask(group);
|
| 112 |
+
unsigned int leader_lane = 0;
|
| 113 |
+
|
| 114 |
+
if (elect_one(group, mask, leader_lane)) {
|
| 115 |
+
result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
// Need to use low level api instead of group.shfl, because elect_one returns lane id, not group rank.
|
| 119 |
+
return tile::shuffle_dispatch<ResultType>::shfl(result, mask, leader_lane, 32);
|
| 120 |
+
}
|
| 121 |
+
};
|
| 122 |
+
|
| 123 |
+
template<>
|
| 124 |
+
struct invoke_one_impl<false> {
|
| 125 |
+
template<typename Group, typename Fn, typename... Args>
|
| 126 |
+
_CG_STATIC_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
|
| 127 |
+
if (group.thread_rank() == 0) {
|
| 128 |
+
_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
template<typename Group, typename Fn, typename... Args>
|
| 133 |
+
_CG_STATIC_QUALIFIER auto invoke_one_broadcast(const Group& group, Fn&& fn, Args&&... args)
|
| 134 |
+
-> typename _CG_STL_NAMESPACE::remove_reference<
|
| 135 |
+
decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
|
| 136 |
+
|
| 137 |
+
using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
|
| 138 |
+
details::remove_qual<ResultType> result;
|
| 139 |
+
|
| 140 |
+
if (group.thread_rank() == 0) {
|
| 141 |
+
result = _CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
return group.shfl(result, 0);
|
| 145 |
+
}
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
}; // namespace details
|
| 150 |
+
|
| 151 |
+
template<typename Group, typename Fn, typename... Args>
|
| 152 |
+
_CG_QUALIFIER void invoke_one(const Group& group, Fn&& fn, Args&&... args) {
|
| 153 |
+
using impl = details::invoke_one_impl<details::elect_group_supported<Group>::value>;
|
| 154 |
+
impl::invoke_one(group, _CG_STL_NAMESPACE::forward<Fn>(fn), _CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
template<typename Fn, typename... Args>
|
| 158 |
+
_CG_QUALIFIER auto invoke_one_broadcast(const coalesced_group& group, Fn&& fn, Args&&... args)
|
| 159 |
+
-> typename _CG_STL_NAMESPACE::remove_reference<
|
| 160 |
+
decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
|
| 161 |
+
|
| 162 |
+
using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
|
| 163 |
+
static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
|
| 164 |
+
"For invocables returning void invoke_one should be used instead");
|
| 165 |
+
using impl = details::invoke_one_impl<details::elect_group_supported<coalesced_group>::value>;
|
| 166 |
+
return impl::invoke_one_broadcast(group,
|
| 167 |
+
_CG_STL_NAMESPACE::forward<Fn>(fn),
|
| 168 |
+
_CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
template<unsigned int Size, typename Parent, typename Fn, typename... Args>
|
| 172 |
+
_CG_QUALIFIER auto invoke_one_broadcast(const thread_block_tile<Size, Parent>& group, Fn&& fn, Args&&... args)
|
| 173 |
+
-> typename _CG_STL_NAMESPACE::remove_reference<
|
| 174 |
+
decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...))>::type {
|
| 175 |
+
|
| 176 |
+
using ResultType = decltype(_CG_STL_NAMESPACE::forward<Fn>(fn)(_CG_STL_NAMESPACE::forward<Args>(args)...));
|
| 177 |
+
static_assert(!_CG_STL_NAMESPACE::is_same<ResultType, void>::value,
|
| 178 |
+
"For invocables returning void invoke_one should be used instead");
|
| 179 |
+
using impl = details::invoke_one_impl<details::elect_group_supported<thread_block_tile<Size, Parent>>::value>;
|
| 180 |
+
return impl::invoke_one_broadcast(group,
|
| 181 |
+
_CG_STL_NAMESPACE::forward<Fn>(fn),
|
| 182 |
+
_CG_STL_NAMESPACE::forward<Args>(args)...);
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
_CG_END_NAMESPACE
|
| 186 |
+
|
| 187 |
+
#endif //_CG_CPP11_FEATURES
|
| 188 |
+
|
| 189 |
+
#endif // _CG_INVOKE_H
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/memcpy_async.h
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _COOPERATIVE_GROUPS_MEMCPY_ASYNC
|
| 50 |
+
#define _COOPERATIVE_GROUPS_MEMCPY_ASYNC
|
| 51 |
+
|
| 52 |
+
#include "../cooperative_groups.h"
|
| 53 |
+
#include "details/info.h"
|
| 54 |
+
|
| 55 |
+
#ifdef _CG_CPP11_FEATURES
|
| 56 |
+
# include "details/async.h"
|
| 57 |
+
#else
|
| 58 |
+
# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| 59 |
+
-std=c++11 compiler option.
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
#endif // _COOPERATIVE_GROUPS_MEMCPY_ASYNC
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/reduce.h
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _COOPERATIVE_GROUPS_REDUCE_H
|
| 50 |
+
#define _COOPERATIVE_GROUPS_REDUCE_H
|
| 51 |
+
|
| 52 |
+
#include "../cooperative_groups.h"
|
| 53 |
+
#include "details/info.h"
|
| 54 |
+
|
| 55 |
+
#ifdef _CG_CPP11_FEATURES
|
| 56 |
+
# include "details/reduce.h"
|
| 57 |
+
#else
|
| 58 |
+
# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| 59 |
+
-std=c++11 compiler option.
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
#endif //_COOPERATIVE_GROUPS_REDUCE_H
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/scan.h
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _COOPERATIVE_GROUPS_SCAN_H
|
| 50 |
+
#define _COOPERATIVE_GROUPS_SCAN_H
|
| 51 |
+
|
| 52 |
+
#include "../cooperative_groups.h"
|
| 53 |
+
#include "details/info.h"
|
| 54 |
+
|
| 55 |
+
#ifdef _CG_CPP11_FEATURES
|
| 56 |
+
# include "details/scan.h"
|
| 57 |
+
#else
|
| 58 |
+
# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| 59 |
+
-std=c++11 compiler option.
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
#endif //_COOPERATIVE_GROUPS_SCAN_H
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(CU_COMPLEX_H_)
|
| 51 |
+
#define CU_COMPLEX_H_
|
| 52 |
+
|
| 53 |
+
#if !defined(__CUDACC_RTC__)
|
| 54 |
+
#if defined(__GNUC__)
|
| 55 |
+
#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)))
|
| 56 |
+
#pragma GCC diagnostic ignored "-Wunused-function"
|
| 57 |
+
#endif
|
| 58 |
+
#endif
|
| 59 |
+
#endif
|
| 60 |
+
|
| 61 |
+
/* When trying to include C header file in C++ Code extern "C" is required
|
| 62 |
+
* But the Standard QNX headers already have ifdef extern in them when compiling C++ Code
|
| 63 |
+
* extern "C" cannot be nested
|
| 64 |
+
* Hence keep the header out of extern "C" block
|
| 65 |
+
*/
|
| 66 |
+
|
| 67 |
+
#if !defined(__CUDACC__)
|
| 68 |
+
#include <math.h> /* import fabsf, sqrt */
|
| 69 |
+
#endif /* !defined(__CUDACC__) */
|
| 70 |
+
|
| 71 |
+
#if defined(__cplusplus)
|
| 72 |
+
extern "C" {
|
| 73 |
+
#endif /* __cplusplus */
|
| 74 |
+
|
| 75 |
+
#include "vector_types.h"
|
| 76 |
+
|
| 77 |
+
typedef float2 cuFloatComplex;
|
| 78 |
+
|
| 79 |
+
__host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x)
|
| 80 |
+
{
|
| 81 |
+
return x.x;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
__host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x)
|
| 85 |
+
{
|
| 86 |
+
return x.y;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
__host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex
|
| 90 |
+
(float r, float i)
|
| 91 |
+
{
|
| 92 |
+
cuFloatComplex res;
|
| 93 |
+
res.x = r;
|
| 94 |
+
res.y = i;
|
| 95 |
+
return res;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
__host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x)
|
| 99 |
+
{
|
| 100 |
+
return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x));
|
| 101 |
+
}
|
| 102 |
+
__host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x,
|
| 103 |
+
cuFloatComplex y)
|
| 104 |
+
{
|
| 105 |
+
return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y),
|
| 106 |
+
cuCimagf(x) + cuCimagf(y));
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
__host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x,
|
| 110 |
+
cuFloatComplex y)
|
| 111 |
+
{
|
| 112 |
+
return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y),
|
| 113 |
+
cuCimagf(x) - cuCimagf(y));
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
/* This implementation could suffer from intermediate overflow even though
|
| 117 |
+
* the final result would be in range. However, various implementations do
|
| 118 |
+
* not guard against this (presumably to avoid losing performance), so we
|
| 119 |
+
* don't do it either to stay competitive.
|
| 120 |
+
*/
|
| 121 |
+
__host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x,
|
| 122 |
+
cuFloatComplex y)
|
| 123 |
+
{
|
| 124 |
+
cuFloatComplex prod;
|
| 125 |
+
prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) -
|
| 126 |
+
(cuCimagf(x) * cuCimagf(y)),
|
| 127 |
+
(cuCrealf(x) * cuCimagf(y)) +
|
| 128 |
+
(cuCimagf(x) * cuCrealf(y)));
|
| 129 |
+
return prod;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
/* This implementation guards against intermediate underflow and overflow
|
| 133 |
+
* by scaling. Such guarded implementations are usually the default for
|
| 134 |
+
* complex library implementations, with some also offering an unguarded,
|
| 135 |
+
* faster version.
|
| 136 |
+
*/
|
| 137 |
+
__host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x,
|
| 138 |
+
cuFloatComplex y)
|
| 139 |
+
{
|
| 140 |
+
cuFloatComplex quot;
|
| 141 |
+
float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y));
|
| 142 |
+
float oos = 1.0f / s;
|
| 143 |
+
float ars = cuCrealf(x) * oos;
|
| 144 |
+
float ais = cuCimagf(x) * oos;
|
| 145 |
+
float brs = cuCrealf(y) * oos;
|
| 146 |
+
float bis = cuCimagf(y) * oos;
|
| 147 |
+
s = (brs * brs) + (bis * bis);
|
| 148 |
+
oos = 1.0f / s;
|
| 149 |
+
quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos,
|
| 150 |
+
((ais * brs) - (ars * bis)) * oos);
|
| 151 |
+
return quot;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
/*
|
| 155 |
+
* We would like to call hypotf(), but it's not available on all platforms.
|
| 156 |
+
* This discrete implementation guards against intermediate underflow and
|
| 157 |
+
* overflow by scaling. Otherwise we would lose half the exponent range.
|
| 158 |
+
* There are various ways of doing guarded computation. For now chose the
|
| 159 |
+
* simplest and fastest solution, however this may suffer from inaccuracies
|
| 160 |
+
* if sqrt and division are not IEEE compliant.
|
| 161 |
+
*/
|
| 162 |
+
__host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x)
|
| 163 |
+
{
|
| 164 |
+
float a = cuCrealf(x);
|
| 165 |
+
float b = cuCimagf(x);
|
| 166 |
+
float v, w, t;
|
| 167 |
+
a = fabsf(a);
|
| 168 |
+
b = fabsf(b);
|
| 169 |
+
if (a > b) {
|
| 170 |
+
v = a;
|
| 171 |
+
w = b;
|
| 172 |
+
} else {
|
| 173 |
+
v = b;
|
| 174 |
+
w = a;
|
| 175 |
+
}
|
| 176 |
+
t = w / v;
|
| 177 |
+
t = 1.0f + t * t;
|
| 178 |
+
t = v * sqrtf(t);
|
| 179 |
+
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
|
| 180 |
+
t = v + w;
|
| 181 |
+
}
|
| 182 |
+
return t;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
/* Double precision */
|
| 186 |
+
typedef double2 cuDoubleComplex;
|
| 187 |
+
|
| 188 |
+
__host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x)
|
| 189 |
+
{
|
| 190 |
+
return x.x;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
__host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x)
|
| 194 |
+
{
|
| 195 |
+
return x.y;
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
__host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex
|
| 199 |
+
(double r, double i)
|
| 200 |
+
{
|
| 201 |
+
cuDoubleComplex res;
|
| 202 |
+
res.x = r;
|
| 203 |
+
res.y = i;
|
| 204 |
+
return res;
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x)
|
| 208 |
+
{
|
| 209 |
+
return make_cuDoubleComplex (cuCreal(x), -cuCimag(x));
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x,
|
| 213 |
+
cuDoubleComplex y)
|
| 214 |
+
{
|
| 215 |
+
return make_cuDoubleComplex (cuCreal(x) + cuCreal(y),
|
| 216 |
+
cuCimag(x) + cuCimag(y));
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x,
|
| 220 |
+
cuDoubleComplex y)
|
| 221 |
+
{
|
| 222 |
+
return make_cuDoubleComplex (cuCreal(x) - cuCreal(y),
|
| 223 |
+
cuCimag(x) - cuCimag(y));
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
/* This implementation could suffer from intermediate overflow even though
|
| 227 |
+
* the final result would be in range. However, various implementations do
|
| 228 |
+
* not guard against this (presumably to avoid losing performance), so we
|
| 229 |
+
* don't do it either to stay competitive.
|
| 230 |
+
*/
|
| 231 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x,
|
| 232 |
+
cuDoubleComplex y)
|
| 233 |
+
{
|
| 234 |
+
cuDoubleComplex prod;
|
| 235 |
+
prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) -
|
| 236 |
+
(cuCimag(x) * cuCimag(y)),
|
| 237 |
+
(cuCreal(x) * cuCimag(y)) +
|
| 238 |
+
(cuCimag(x) * cuCreal(y)));
|
| 239 |
+
return prod;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
/* This implementation guards against intermediate underflow and overflow
|
| 243 |
+
* by scaling. Such guarded implementations are usually the default for
|
| 244 |
+
* complex library implementations, with some also offering an unguarded,
|
| 245 |
+
* faster version.
|
| 246 |
+
*/
|
| 247 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x,
|
| 248 |
+
cuDoubleComplex y)
|
| 249 |
+
{
|
| 250 |
+
cuDoubleComplex quot;
|
| 251 |
+
double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y)));
|
| 252 |
+
double oos = 1.0 / s;
|
| 253 |
+
double ars = cuCreal(x) * oos;
|
| 254 |
+
double ais = cuCimag(x) * oos;
|
| 255 |
+
double brs = cuCreal(y) * oos;
|
| 256 |
+
double bis = cuCimag(y) * oos;
|
| 257 |
+
s = (brs * brs) + (bis * bis);
|
| 258 |
+
oos = 1.0 / s;
|
| 259 |
+
quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos,
|
| 260 |
+
((ais * brs) - (ars * bis)) * oos);
|
| 261 |
+
return quot;
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
/* This implementation guards against intermediate underflow and overflow
|
| 265 |
+
* by scaling. Otherwise we would lose half the exponent range. There are
|
| 266 |
+
* various ways of doing guarded computation. For now chose the simplest
|
| 267 |
+
* and fastest solution, however this may suffer from inaccuracies if sqrt
|
| 268 |
+
* and division are not IEEE compliant.
|
| 269 |
+
*/
|
| 270 |
+
__host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x)
|
| 271 |
+
{
|
| 272 |
+
double a = cuCreal(x);
|
| 273 |
+
double b = cuCimag(x);
|
| 274 |
+
double v, w, t;
|
| 275 |
+
a = fabs(a);
|
| 276 |
+
b = fabs(b);
|
| 277 |
+
if (a > b) {
|
| 278 |
+
v = a;
|
| 279 |
+
w = b;
|
| 280 |
+
} else {
|
| 281 |
+
v = b;
|
| 282 |
+
w = a;
|
| 283 |
+
}
|
| 284 |
+
t = w / v;
|
| 285 |
+
t = 1.0 + t * t;
|
| 286 |
+
t = v * sqrt(t);
|
| 287 |
+
if ((v == 0.0) ||
|
| 288 |
+
(v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) {
|
| 289 |
+
t = v + w;
|
| 290 |
+
}
|
| 291 |
+
return t;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
#if defined(__cplusplus)
|
| 295 |
+
}
|
| 296 |
+
#endif /* __cplusplus */
|
| 297 |
+
|
| 298 |
+
/* aliases */
|
| 299 |
+
typedef cuFloatComplex cuComplex;
|
| 300 |
+
__host__ __device__ static __inline__ cuComplex make_cuComplex (float x,
|
| 301 |
+
float y)
|
| 302 |
+
{
|
| 303 |
+
return make_cuFloatComplex (x, y);
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
/* float-to-double promotion */
|
| 307 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble
|
| 308 |
+
(cuFloatComplex c)
|
| 309 |
+
{
|
| 310 |
+
return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c));
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
__host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat
|
| 314 |
+
(cuDoubleComplex c)
|
| 315 |
+
{
|
| 316 |
+
return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c));
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
__host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d)
|
| 321 |
+
{
|
| 322 |
+
float real_res;
|
| 323 |
+
float imag_res;
|
| 324 |
+
|
| 325 |
+
real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d);
|
| 326 |
+
imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d);
|
| 327 |
+
|
| 328 |
+
real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res;
|
| 329 |
+
imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res;
|
| 330 |
+
|
| 331 |
+
return make_cuComplex(real_res, imag_res);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
__host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d)
|
| 335 |
+
{
|
| 336 |
+
double real_res;
|
| 337 |
+
double imag_res;
|
| 338 |
+
|
| 339 |
+
real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d);
|
| 340 |
+
imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d);
|
| 341 |
+
|
| 342 |
+
real_res = -(cuCimag(x) * cuCimag(y)) + real_res;
|
| 343 |
+
imag_res = (cuCimag(x) * cuCreal(y)) + imag_res;
|
| 344 |
+
|
| 345 |
+
return make_cuDoubleComplex(real_res, imag_res);
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
#endif /* !defined(CU_COMPLEX_H_) */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef CUDAEGLTYPEDEFS_H
|
| 51 |
+
#define CUDAEGLTYPEDEFS_H
|
| 52 |
+
|
| 53 |
+
#include <cudaEGL.h>
|
| 54 |
+
|
| 55 |
+
#ifdef __cplusplus
|
| 56 |
+
extern "C" {
|
| 57 |
+
#endif // __cplusplus
|
| 58 |
+
|
| 59 |
+
/*
|
| 60 |
+
* Macros for the latest version for each driver function in cudaEGL.h
|
| 61 |
+
*/
|
| 62 |
+
#define PFN_cuGraphicsEGLRegisterImage PFN_cuGraphicsEGLRegisterImage_v7000
|
| 63 |
+
#define PFN_cuEGLStreamConsumerConnect PFN_cuEGLStreamConsumerConnect_v7000
|
| 64 |
+
#define PFN_cuEGLStreamConsumerConnectWithFlags PFN_cuEGLStreamConsumerConnectWithFlags_v8000
|
| 65 |
+
#define PFN_cuEGLStreamConsumerDisconnect PFN_cuEGLStreamConsumerDisconnect_v7000
|
| 66 |
+
#define PFN_cuEGLStreamConsumerAcquireFrame PFN_cuEGLStreamConsumerAcquireFrame_v7000
|
| 67 |
+
#define PFN_cuEGLStreamConsumerReleaseFrame PFN_cuEGLStreamConsumerReleaseFrame_v7000
|
| 68 |
+
#define PFN_cuEGLStreamProducerConnect PFN_cuEGLStreamProducerConnect_v7000
|
| 69 |
+
#define PFN_cuEGLStreamProducerDisconnect PFN_cuEGLStreamProducerDisconnect_v7000
|
| 70 |
+
#define PFN_cuEGLStreamProducerPresentFrame PFN_cuEGLStreamProducerPresentFrame_v7000
|
| 71 |
+
#define PFN_cuEGLStreamProducerReturnFrame PFN_cuEGLStreamProducerReturnFrame_v7000
|
| 72 |
+
#define PFN_cuGraphicsResourceGetMappedEglFrame PFN_cuGraphicsResourceGetMappedEglFrame_v7000
|
| 73 |
+
#define PFN_cuEventCreateFromEGLSync PFN_cuEventCreateFromEGLSync_v9000
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
/**
|
| 77 |
+
* Type definitions for functions defined in cudaEGL.h
|
| 78 |
+
*/
|
| 79 |
+
typedef CUresult (CUDAAPI *PFN_cuGraphicsEGLRegisterImage_v7000)(CUgraphicsResource CUDAAPI *pCudaResource, EGLImageKHR image, unsigned int flags);
|
| 80 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream);
|
| 81 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnectWithFlags_v8000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, unsigned int flags);
|
| 82 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
|
| 83 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerAcquireFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource CUDAAPI *pCudaResource, CUstream CUDAAPI *pStream, unsigned int timeout);
|
| 84 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerReleaseFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource pCudaResource, CUstream CUDAAPI *pStream);
|
| 85 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, EGLint width, EGLint height);
|
| 86 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
|
| 87 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerPresentFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 eglframe, CUstream CUDAAPI *pStream);
|
| 88 |
+
typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerReturnFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 CUDAAPI *eglframe, CUstream CUDAAPI *pStream);
|
| 89 |
+
typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedEglFrame_v7000)(CUeglFrame_v1 CUDAAPI *eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel);
|
| 90 |
+
typedef CUresult (CUDAAPI *PFN_cuEventCreateFromEGLSync_v9000)(CUevent CUDAAPI *phEvent, EGLSyncKHR eglSync, unsigned int flags);
|
| 91 |
+
|
| 92 |
+
#ifdef __cplusplus
|
| 93 |
+
}
|
| 94 |
+
#endif // __cplusplus
|
| 95 |
+
|
| 96 |
+
#endif // file guard
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef CUDAPROFILERTYPEDEFS_H
|
| 51 |
+
#define CUDAPROFILERTYPEDEFS_H
|
| 52 |
+
|
| 53 |
+
#include <cudaProfiler.h>
|
| 54 |
+
|
| 55 |
+
#ifdef __cplusplus
|
| 56 |
+
extern "C" {
|
| 57 |
+
#endif // __cplusplus
|
| 58 |
+
|
| 59 |
+
/*
|
| 60 |
+
* Macros for the latest version for each driver function in cudaProfiler.h
|
| 61 |
+
*/
|
| 62 |
+
#define PFN_cuProfilerInitialize PFN_cuProfilerInitialize_v4000
|
| 63 |
+
#define PFN_cuProfilerStart PFN_cuProfilerStart_v4000
|
| 64 |
+
#define PFN_cuProfilerStop PFN_cuProfilerStop_v4000
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
/**
|
| 68 |
+
* Type definitions for functions defined in cudaProfiler.h
|
| 69 |
+
*/
|
| 70 |
+
typedef CUresult (CUDAAPI *PFN_cuProfilerInitialize_v4000)(const char *configFile, const char *outputFile, CUoutput_mode outputMode);
|
| 71 |
+
typedef CUresult (CUDAAPI *PFN_cuProfilerStart_v4000)(void);
|
| 72 |
+
typedef CUresult (CUDAAPI *PFN_cuProfilerStop_v4000)(void);
|
| 73 |
+
|
| 74 |
+
#ifdef __cplusplus
|
| 75 |
+
}
|
| 76 |
+
#endif // __cplusplus
|
| 77 |
+
|
| 78 |
+
#endif // file guard
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef CUDAVDPAU_H
|
| 51 |
+
#define CUDAVDPAU_H
|
| 52 |
+
|
| 53 |
+
#ifdef CUDA_FORCE_API_VERSION
|
| 54 |
+
#error "CUDA_FORCE_API_VERSION is no longer supported."
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
#define cuVDPAUCtxCreate cuVDPAUCtxCreate_v2
|
| 58 |
+
|
| 59 |
+
#ifdef __cplusplus
|
| 60 |
+
extern "C" {
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
/**
|
| 64 |
+
* \defgroup CUDA_VDPAU VDPAU Interoperability
|
| 65 |
+
* \ingroup CUDA_DRIVER
|
| 66 |
+
*
|
| 67 |
+
* ___MANBRIEF___ VDPAU interoperability functions of the low-level CUDA driver
|
| 68 |
+
* API (___CURRENT_FILE___) ___ENDMANBRIEF___
|
| 69 |
+
*
|
| 70 |
+
* This section describes the VDPAU interoperability functions of the
|
| 71 |
+
* low-level CUDA driver application programming interface.
|
| 72 |
+
*
|
| 73 |
+
* @{
|
| 74 |
+
*/
|
| 75 |
+
|
| 76 |
+
/**
|
| 77 |
+
* \brief Gets the CUDA device associated with a VDPAU device
|
| 78 |
+
*
|
| 79 |
+
* Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
|
| 80 |
+
* applicable.
|
| 81 |
+
*
|
| 82 |
+
* \param pDevice - Device associated with vdpDevice
|
| 83 |
+
* \param vdpDevice - A VdpDevice handle
|
| 84 |
+
* \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
|
| 85 |
+
*
|
| 86 |
+
* \return
|
| 87 |
+
* ::CUDA_SUCCESS,
|
| 88 |
+
* ::CUDA_ERROR_DEINITIALIZED,
|
| 89 |
+
* ::CUDA_ERROR_NOT_INITIALIZED,
|
| 90 |
+
* ::CUDA_ERROR_INVALID_CONTEXT,
|
| 91 |
+
* ::CUDA_ERROR_INVALID_VALUE
|
| 92 |
+
* \notefnerr
|
| 93 |
+
*
|
| 94 |
+
* \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
|
| 95 |
+
* ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
|
| 96 |
+
* ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
|
| 97 |
+
* ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
|
| 98 |
+
* ::cudaVDPAUGetDevice
|
| 99 |
+
*/
|
| 100 |
+
CUresult CUDAAPI cuVDPAUGetDevice(CUdevice *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 101 |
+
|
| 102 |
+
/**
|
| 103 |
+
* \brief Create a CUDA context for interoperability with VDPAU
|
| 104 |
+
*
|
| 105 |
+
* Creates a new CUDA context, initializes VDPAU interoperability, and
|
| 106 |
+
* associates the CUDA context with the calling thread. It must be called
|
| 107 |
+
* before performing any other VDPAU interoperability operations. It may fail
|
| 108 |
+
* if the needed VDPAU driver facilities are not available. For usage of the
|
| 109 |
+
* \p flags parameter, see ::cuCtxCreate().
|
| 110 |
+
*
|
| 111 |
+
* \param pCtx - Returned CUDA context
|
| 112 |
+
* \param flags - Options for CUDA context creation
|
| 113 |
+
* \param device - Device on which to create the context
|
| 114 |
+
* \param vdpDevice - The VdpDevice to interop with
|
| 115 |
+
* \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
|
| 116 |
+
*
|
| 117 |
+
* \return
|
| 118 |
+
* ::CUDA_SUCCESS,
|
| 119 |
+
* ::CUDA_ERROR_DEINITIALIZED,
|
| 120 |
+
* ::CUDA_ERROR_NOT_INITIALIZED,
|
| 121 |
+
* ::CUDA_ERROR_INVALID_CONTEXT,
|
| 122 |
+
* ::CUDA_ERROR_INVALID_VALUE,
|
| 123 |
+
* ::CUDA_ERROR_OUT_OF_MEMORY
|
| 124 |
+
* \notefnerr
|
| 125 |
+
*
|
| 126 |
+
* \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
|
| 127 |
+
* ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
|
| 128 |
+
* ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
|
| 129 |
+
* ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
|
| 130 |
+
* ::cuVDPAUGetDevice
|
| 131 |
+
*/
|
| 132 |
+
CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 133 |
+
|
| 134 |
+
/**
|
| 135 |
+
* \brief Registers a VDPAU VdpVideoSurface object
|
| 136 |
+
*
|
| 137 |
+
* Registers the VdpVideoSurface specified by \p vdpSurface for access by
|
| 138 |
+
* CUDA. A handle to the registered object is returned as \p pCudaResource.
|
| 139 |
+
* The surface's intended usage is specified using \p flags, as follows:
|
| 140 |
+
*
|
| 141 |
+
* - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
|
| 142 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 143 |
+
* read from and written to by CUDA. This is the default value.
|
| 144 |
+
* - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
|
| 145 |
+
* will not write to this resource.
|
| 146 |
+
* - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
|
| 147 |
+
* CUDA will not read from this resource and will write over the
|
| 148 |
+
* entire contents of the resource, so none of the data previously
|
| 149 |
+
* stored in the resource will be preserved.
|
| 150 |
+
*
|
| 151 |
+
* The VdpVideoSurface is presented as an array of subresources that may be
|
| 152 |
+
* accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
|
| 153 |
+
* The exact number of valid \p arrayIndex values depends on the VDPAU surface
|
| 154 |
+
* format. The mapping is shown in the table below. \p mipLevel must be 0.
|
| 155 |
+
*
|
| 156 |
+
* \htmlonly
|
| 157 |
+
* <table>
|
| 158 |
+
* <tr><th>VdpChromaType </th><th>arrayIndex</th><th>Size </th><th>Format</th><th>Content </th></tr>
|
| 159 |
+
* <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_420</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
|
| 160 |
+
* <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
|
| 161 |
+
* <tr> <td>2 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Top-field chroma </td></tr>
|
| 162 |
+
* <tr> <td>3 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
|
| 163 |
+
* <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_422</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
|
| 164 |
+
* <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
|
| 165 |
+
* <tr> <td>2 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Top-field chroma </td></tr>
|
| 166 |
+
* <tr> <td>3 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
|
| 167 |
+
* </table>
|
| 168 |
+
* \endhtmlonly
|
| 169 |
+
*
|
| 170 |
+
* \latexonly
|
| 171 |
+
* \begin{tabular}{|l|l|l|l|l|}
|
| 172 |
+
* \hline
|
| 173 |
+
* VdpChromaType & arrayIndex & Size & Format & Content \\
|
| 174 |
+
* \hline
|
| 175 |
+
* VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\
|
| 176 |
+
* & 1 & w x h/2 & R8 & Bottom-field luma \\
|
| 177 |
+
* & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\
|
| 178 |
+
* & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\
|
| 179 |
+
* \hline
|
| 180 |
+
* VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\
|
| 181 |
+
* & 1 & w x h/2 & R8 & Bottom-field luma \\
|
| 182 |
+
* & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\
|
| 183 |
+
* & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\
|
| 184 |
+
* \hline
|
| 185 |
+
* \end{tabular}
|
| 186 |
+
* \endlatexonly
|
| 187 |
+
*
|
| 188 |
+
* \param pCudaResource - Pointer to the returned object handle
|
| 189 |
+
* \param vdpSurface - The VdpVideoSurface to be registered
|
| 190 |
+
* \param flags - Map flags
|
| 191 |
+
*
|
| 192 |
+
* \return
|
| 193 |
+
* ::CUDA_SUCCESS,
|
| 194 |
+
* ::CUDA_ERROR_INVALID_HANDLE,
|
| 195 |
+
* ::CUDA_ERROR_ALREADY_MAPPED,
|
| 196 |
+
* ::CUDA_ERROR_INVALID_CONTEXT,
|
| 197 |
+
* \notefnerr
|
| 198 |
+
*
|
| 199 |
+
* \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
|
| 200 |
+
* ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
|
| 201 |
+
* ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
|
| 202 |
+
* ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
|
| 203 |
+
* ::cuVDPAUGetDevice,
|
| 204 |
+
* ::cudaGraphicsVDPAURegisterVideoSurface
|
| 205 |
+
*/
|
| 206 |
+
CUresult CUDAAPI cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
|
| 207 |
+
|
| 208 |
+
/**
|
| 209 |
+
* \brief Registers a VDPAU VdpOutputSurface object
|
| 210 |
+
*
|
| 211 |
+
* Registers the VdpOutputSurface specified by \p vdpSurface for access by
|
| 212 |
+
* CUDA. A handle to the registered object is returned as \p pCudaResource.
|
| 213 |
+
* The surface's intended usage is specified using \p flags, as follows:
|
| 214 |
+
*
|
| 215 |
+
* - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
|
| 216 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 217 |
+
* read from and written to by CUDA. This is the default value.
|
| 218 |
+
* - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
|
| 219 |
+
* will not write to this resource.
|
| 220 |
+
* - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
|
| 221 |
+
* CUDA will not read from this resource and will write over the
|
| 222 |
+
* entire contents of the resource, so none of the data previously
|
| 223 |
+
* stored in the resource will be preserved.
|
| 224 |
+
*
|
| 225 |
+
* The VdpOutputSurface is presented as an array of subresources that may be
|
| 226 |
+
* accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
|
| 227 |
+
* The exact number of valid \p arrayIndex values depends on the VDPAU surface
|
| 228 |
+
* format. The mapping is shown in the table below. \p mipLevel must be 0.
|
| 229 |
+
*
|
| 230 |
+
* \htmlonly
|
| 231 |
+
* <table>
|
| 232 |
+
* <tr><th>VdpRGBAFormat </th><th>arrayIndex</th><th>Size </th><th>Format </th><th>Content </th></tr>
|
| 233 |
+
* <tr><td>VDP_RGBA_FORMAT_B8G8R8A8 </td><td>0 </td><td>w x h</td><td>ARGB8 </td><td>Entire surface</td></tr>
|
| 234 |
+
* <tr><td>VDP_RGBA_FORMAT_R10G10B10A2</td><td>0 </td><td>w x h</td><td>A2BGR10</td><td>Entire surface</td></tr>
|
| 235 |
+
* </table>
|
| 236 |
+
* \endhtmlonly
|
| 237 |
+
*
|
| 238 |
+
* \latexonly
|
| 239 |
+
* \begin{tabular}{|l|l|l|l|l|}
|
| 240 |
+
* \hline
|
| 241 |
+
* VdpRGBAFormat & arrayIndex & Size & Format & Content \\
|
| 242 |
+
* \hline
|
| 243 |
+
* VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\
|
| 244 |
+
* VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\
|
| 245 |
+
* \hline
|
| 246 |
+
* \end{tabular}
|
| 247 |
+
* \endlatexonly
|
| 248 |
+
*
|
| 249 |
+
* \param pCudaResource - Pointer to the returned object handle
|
| 250 |
+
* \param vdpSurface - The VdpOutputSurface to be registered
|
| 251 |
+
* \param flags - Map flags
|
| 252 |
+
*
|
| 253 |
+
* \return
|
| 254 |
+
* ::CUDA_SUCCESS,
|
| 255 |
+
* ::CUDA_ERROR_INVALID_HANDLE,
|
| 256 |
+
* ::CUDA_ERROR_ALREADY_MAPPED,
|
| 257 |
+
* ::CUDA_ERROR_INVALID_CONTEXT,
|
| 258 |
+
* \notefnerr
|
| 259 |
+
*
|
| 260 |
+
* \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
|
| 261 |
+
* ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource,
|
| 262 |
+
* ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
|
| 263 |
+
* ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
|
| 264 |
+
* ::cuVDPAUGetDevice,
|
| 265 |
+
* ::cudaGraphicsVDPAURegisterOutputSurface
|
| 266 |
+
*/
|
| 267 |
+
CUresult CUDAAPI cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
|
| 268 |
+
|
| 269 |
+
/** @} */ /* END CUDA_VDPAU */
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
#if defined(__CUDA_API_VERSION_INTERNAL)
|
| 273 |
+
#undef cuVDPAUCtxCreate
|
| 274 |
+
|
| 275 |
+
CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 276 |
+
#endif /* __CUDA_API_VERSION_INTERNAL */
|
| 277 |
+
|
| 278 |
+
#ifdef __cplusplus
|
| 279 |
+
};
|
| 280 |
+
#endif
|
| 281 |
+
|
| 282 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef CUDAVDPAUTYPEDEFS_H
|
| 51 |
+
#define CUDAVDPAUTYPEDEFS_H
|
| 52 |
+
|
| 53 |
+
// Dependent includes for cudavdpau.h
|
| 54 |
+
#include <vdpau/vdpau.h>
|
| 55 |
+
|
| 56 |
+
#include <cudaVDPAU.h>
|
| 57 |
+
|
| 58 |
+
#ifdef __cplusplus
|
| 59 |
+
extern "C" {
|
| 60 |
+
#endif // __cplusplus
|
| 61 |
+
|
| 62 |
+
/*
|
| 63 |
+
* Macros for the latest version for each driver function in cudaVDPAU.h
|
| 64 |
+
*/
|
| 65 |
+
#define PFN_cuVDPAUGetDevice PFN_cuVDPAUGetDevice_v3010
|
| 66 |
+
#define PFN_cuVDPAUCtxCreate PFN_cuVDPAUCtxCreate_v3020
|
| 67 |
+
#define PFN_cuGraphicsVDPAURegisterVideoSurface PFN_cuGraphicsVDPAURegisterVideoSurface_v3010
|
| 68 |
+
#define PFN_cuGraphicsVDPAURegisterOutputSurface PFN_cuGraphicsVDPAURegisterOutputSurface_v3010
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
/**
|
| 72 |
+
* Type definitions for functions defined in cudaVDPAU.h
|
| 73 |
+
*/
|
| 74 |
+
typedef CUresult (CUDAAPI *PFN_cuVDPAUGetDevice_v3010)(CUdevice_v1 *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 75 |
+
typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3020)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 76 |
+
typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterVideoSurface_v3010)(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
|
| 77 |
+
typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterOutputSurface_v3010)(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
|
| 78 |
+
|
| 79 |
+
/*
|
| 80 |
+
* Type definitions for older versioned functions in cudaVDPAU.h
|
| 81 |
+
*/
|
| 82 |
+
#if defined(__CUDA_API_VERSION_INTERNAL)
|
| 83 |
+
typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3010)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 84 |
+
#endif
|
| 85 |
+
|
| 86 |
+
#ifdef __cplusplus
|
| 87 |
+
}
|
| 88 |
+
#endif // __cplusplus
|
| 89 |
+
|
| 90 |
+
#endif // file guard
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef _CUDA_AWBARRIER_PRIMITIVES_H_
|
| 51 |
+
#define _CUDA_AWBARRIER_PRIMITIVES_H_
|
| 52 |
+
|
| 53 |
+
#include "cuda_awbarrier_helpers.h"
|
| 54 |
+
|
| 55 |
+
#if !defined(_CUDA_AWBARRIER_SM_TARGET)
|
| 56 |
+
# error This file requires compute capability 7.0 or greater.
|
| 57 |
+
#endif
|
| 58 |
+
|
| 59 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER __host__
|
| 60 |
+
uint32_t __mbarrier_maximum_count() {
|
| 61 |
+
return _CUDA_AWBARRIER_MAX_COUNT;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 65 |
+
void __mbarrier_init(__mbarrier_t* barrier, uint32_t expected_count) {
|
| 66 |
+
_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(barrier, expected_count);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 70 |
+
void __mbarrier_inval(__mbarrier_t* barrier) {
|
| 71 |
+
_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(barrier);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 75 |
+
__mbarrier_token_t __mbarrier_arrive(__mbarrier_t* barrier) {
|
| 76 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(barrier);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 80 |
+
__mbarrier_token_t __mbarrier_arrive_and_drop(__mbarrier_t* barrier) {
|
| 81 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(barrier);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 85 |
+
bool __mbarrier_test_wait(__mbarrier_t* barrier, __mbarrier_token_t token) {
|
| 86 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(barrier, token);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 90 |
+
uint32_t __mbarrier_token_pending_count(__mbarrier_token_t token) {
|
| 91 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(token);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 95 |
+
bool __mbarrier_test_wait_parity(__mbarrier_t* barrier, bool phase_parity) {
|
| 96 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(barrier, phase_parity);
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 100 |
+
bool __mbarrier_try_wait(__mbarrier_t* barrier, __mbarrier_token_t token, uint32_t max_sleep_nanosec) {
|
| 101 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(barrier, token, max_sleep_nanosec);
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
_CUDA_AWBARRIER_STATIC_QUALIFIER
|
| 105 |
+
bool __mbarrier_try_wait_parity(__mbarrier_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) {
|
| 106 |
+
return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(barrier, phase_parity, max_sleep_nanosec);
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
#endif /* !_CUDA_AWBARRIER_PRIMITIVES_H_ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h
ADDED
|
@@ -0,0 +1,642 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_EGL_INTEROP_H__)
|
| 51 |
+
#define __CUDA_EGL_INTEROP_H__
|
| 52 |
+
|
| 53 |
+
#include "cuda_runtime_api.h"
|
| 54 |
+
#include "cuda_runtime.h"
|
| 55 |
+
#include "cudart_platform.h"
|
| 56 |
+
#include "EGL/egl.h"
|
| 57 |
+
#include "EGL/eglext.h"
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus)
|
| 60 |
+
extern "C" {
|
| 61 |
+
#endif /* __cplusplus */
|
| 62 |
+
|
| 63 |
+
/**
|
| 64 |
+
* \addtogroup CUDART_TYPES
|
| 65 |
+
* @{
|
| 66 |
+
*/
|
| 67 |
+
|
| 68 |
+
/**
|
| 69 |
+
* Maximum number of planes per frame
|
| 70 |
+
*/
|
| 71 |
+
#define CUDA_EGL_MAX_PLANES 3
|
| 72 |
+
|
| 73 |
+
/**
|
| 74 |
+
* CUDA EglFrame type - array or pointer
|
| 75 |
+
*/
|
| 76 |
+
typedef enum cudaEglFrameType_enum
|
| 77 |
+
{
|
| 78 |
+
cudaEglFrameTypeArray = 0, /**< Frame type CUDA array */
|
| 79 |
+
cudaEglFrameTypePitch = 1, /**< Frame type CUDA pointer */
|
| 80 |
+
} cudaEglFrameType;
|
| 81 |
+
|
| 82 |
+
/**
|
| 83 |
+
* Resource location flags- sysmem or vidmem
|
| 84 |
+
*
|
| 85 |
+
* For CUDA context on iGPU, since video and system memory are equivalent -
|
| 86 |
+
* these flags will not have an effect on the execution.
|
| 87 |
+
*
|
| 88 |
+
* For CUDA context on dGPU, applications can use the flag ::cudaEglResourceLocationFlags
|
| 89 |
+
* to give a hint about the desired location.
|
| 90 |
+
*
|
| 91 |
+
* ::cudaEglResourceLocationSysmem - the frame data is made resident on the system memory
|
| 92 |
+
* to be accessed by CUDA.
|
| 93 |
+
*
|
| 94 |
+
* ::cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated
|
| 95 |
+
* video memory to be accessed by CUDA.
|
| 96 |
+
*
|
| 97 |
+
* There may be an additional latency due to new allocation and data migration,
|
| 98 |
+
* if the frame is produced on a different memory.
|
| 99 |
+
*/
|
| 100 |
+
typedef enum cudaEglResourceLocationFlags_enum {
|
| 101 |
+
cudaEglResourceLocationSysmem = 0x00, /**< Resource location sysmem */
|
| 102 |
+
cudaEglResourceLocationVidmem = 0x01, /**< Resource location vidmem */
|
| 103 |
+
} cudaEglResourceLocationFlags;
|
| 104 |
+
|
| 105 |
+
/**
|
| 106 |
+
* CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
|
| 107 |
+
*/
|
| 108 |
+
typedef enum cudaEglColorFormat_enum {
|
| 109 |
+
cudaEglColorFormatYUV420Planar = 0, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 110 |
+
cudaEglColorFormatYUV420SemiPlanar = 1, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */
|
| 111 |
+
cudaEglColorFormatYUV422Planar = 2, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 112 |
+
cudaEglColorFormatYUV422SemiPlanar = 3, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */
|
| 113 |
+
cudaEglColorFormatARGB = 6, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */
|
| 114 |
+
cudaEglColorFormatRGBA = 7, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */
|
| 115 |
+
cudaEglColorFormatL = 8, /**< single luminance channel in one surface. */
|
| 116 |
+
cudaEglColorFormatR = 9, /**< single color channel in one surface. */
|
| 117 |
+
cudaEglColorFormatYUV444Planar = 10, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
|
| 118 |
+
cudaEglColorFormatYUV444SemiPlanar = 11, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */
|
| 119 |
+
cudaEglColorFormatYUYV422 = 12, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */
|
| 120 |
+
cudaEglColorFormatUYVY422 = 13, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */
|
| 121 |
+
cudaEglColorFormatABGR = 14, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */
|
| 122 |
+
cudaEglColorFormatBGRA = 15, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */
|
| 123 |
+
cudaEglColorFormatA = 16, /**< Alpha color format - one channel in one surface. */
|
| 124 |
+
cudaEglColorFormatRG = 17, /**< R/G color format - two channels in one surface with GR byte ordering */
|
| 125 |
+
cudaEglColorFormatAYUV = 18, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */
|
| 126 |
+
cudaEglColorFormatYVU444SemiPlanar = 19, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
|
| 127 |
+
cudaEglColorFormatYVU422SemiPlanar = 20, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 128 |
+
cudaEglColorFormatYVU420SemiPlanar = 21, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 129 |
+
cudaEglColorFormatY10V10U10_444SemiPlanar = 22, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
|
| 130 |
+
cudaEglColorFormatY10V10U10_420SemiPlanar = 23, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 131 |
+
cudaEglColorFormatY12V12U12_444SemiPlanar = 24, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
|
| 132 |
+
cudaEglColorFormatY12V12U12_420SemiPlanar = 25, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 133 |
+
cudaEglColorFormatVYUY_ER = 26, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */
|
| 134 |
+
cudaEglColorFormatUYVY_ER = 27, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */
|
| 135 |
+
cudaEglColorFormatYUYV_ER = 28, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */
|
| 136 |
+
cudaEglColorFormatYVYU_ER = 29, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */
|
| 137 |
+
cudaEglColorFormatYUVA_ER = 31, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */
|
| 138 |
+
cudaEglColorFormatAYUV_ER = 32, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */
|
| 139 |
+
cudaEglColorFormatYUV444Planar_ER = 33, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */
|
| 140 |
+
cudaEglColorFormatYUV422Planar_ER = 34, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 141 |
+
cudaEglColorFormatYUV420Planar_ER = 35, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 142 |
+
cudaEglColorFormatYUV444SemiPlanar_ER = 36, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */
|
| 143 |
+
cudaEglColorFormatYUV422SemiPlanar_ER = 37, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 144 |
+
cudaEglColorFormatYUV420SemiPlanar_ER = 38, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 145 |
+
cudaEglColorFormatYVU444Planar_ER = 39, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */
|
| 146 |
+
cudaEglColorFormatYVU422Planar_ER = 40, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 147 |
+
cudaEglColorFormatYVU420Planar_ER = 41, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 148 |
+
cudaEglColorFormatYVU444SemiPlanar_ER = 42, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
|
| 149 |
+
cudaEglColorFormatYVU422SemiPlanar_ER = 43, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 150 |
+
cudaEglColorFormatYVU420SemiPlanar_ER = 44, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 151 |
+
cudaEglColorFormatBayerRGGB = 45, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */
|
| 152 |
+
cudaEglColorFormatBayerBGGR = 46, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */
|
| 153 |
+
cudaEglColorFormatBayerGRBG = 47, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */
|
| 154 |
+
cudaEglColorFormatBayerGBRG = 48, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */
|
| 155 |
+
cudaEglColorFormatBayer10RGGB = 49, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
|
| 156 |
+
cudaEglColorFormatBayer10BGGR = 50, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
|
| 157 |
+
cudaEglColorFormatBayer10GRBG = 51, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
|
| 158 |
+
cudaEglColorFormatBayer10GBRG = 52, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
|
| 159 |
+
cudaEglColorFormatBayer12RGGB = 53, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 160 |
+
cudaEglColorFormatBayer12BGGR = 54, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 161 |
+
cudaEglColorFormatBayer12GRBG = 55, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 162 |
+
cudaEglColorFormatBayer12GBRG = 56, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 163 |
+
cudaEglColorFormatBayer14RGGB = 57, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
|
| 164 |
+
cudaEglColorFormatBayer14BGGR = 58, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
|
| 165 |
+
cudaEglColorFormatBayer14GRBG = 59, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
|
| 166 |
+
cudaEglColorFormatBayer14GBRG = 60, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
|
| 167 |
+
cudaEglColorFormatBayer20RGGB = 61, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
|
| 168 |
+
cudaEglColorFormatBayer20BGGR = 62, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
|
| 169 |
+
cudaEglColorFormatBayer20GRBG = 63, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
|
| 170 |
+
cudaEglColorFormatBayer20GBRG = 64, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
|
| 171 |
+
cudaEglColorFormatYVU444Planar = 65, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
|
| 172 |
+
cudaEglColorFormatYVU422Planar = 66, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
|
| 173 |
+
cudaEglColorFormatYVU420Planar = 67, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 174 |
+
cudaEglColorFormatBayerIspRGGB = 68, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */
|
| 175 |
+
cudaEglColorFormatBayerIspBGGR = 69, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */
|
| 176 |
+
cudaEglColorFormatBayerIspGRBG = 70, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */
|
| 177 |
+
cudaEglColorFormatBayerIspGBRG = 71, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */
|
| 178 |
+
cudaEglColorFormatBayerBCCR = 72, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */
|
| 179 |
+
cudaEglColorFormatBayerRCCB = 73, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */
|
| 180 |
+
cudaEglColorFormatBayerCRBC = 74, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */
|
| 181 |
+
cudaEglColorFormatBayerCBRC = 75, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */
|
| 182 |
+
cudaEglColorFormatBayer10CCCC = 76, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
|
| 183 |
+
cudaEglColorFormatBayer12BCCR = 77, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 184 |
+
cudaEglColorFormatBayer12RCCB = 78, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 185 |
+
cudaEglColorFormatBayer12CRBC = 79, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 186 |
+
cudaEglColorFormatBayer12CBRC = 80, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 187 |
+
cudaEglColorFormatBayer12CCCC = 81, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
|
| 188 |
+
cudaEglColorFormatY = 82, /**< Color format for single Y plane. */
|
| 189 |
+
cudaEglColorFormatYUV420SemiPlanar_2020 = 83, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 190 |
+
cudaEglColorFormatYVU420SemiPlanar_2020 = 84, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 191 |
+
cudaEglColorFormatYUV420Planar_2020 = 85, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 192 |
+
cudaEglColorFormatYVU420Planar_2020 = 86, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 193 |
+
cudaEglColorFormatYUV420SemiPlanar_709 = 87, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 194 |
+
cudaEglColorFormatYVU420SemiPlanar_709 = 88, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 195 |
+
cudaEglColorFormatYUV420Planar_709 = 89, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 196 |
+
cudaEglColorFormatYVU420Planar_709 = 90, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 197 |
+
cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 198 |
+
cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 199 |
+
cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
|
| 200 |
+
cudaEglColorFormatY10V10U10_422SemiPlanar = 94, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
|
| 201 |
+
cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
|
| 202 |
+
cudaEglColorFormatY_ER = 96, /**< Extended Range Color format for single Y plane. */
|
| 203 |
+
cudaEglColorFormatY_709_ER = 97, /**< Extended Range Color format for single Y plane. */
|
| 204 |
+
cudaEglColorFormatY10_ER = 98, /**< Extended Range Color format for single Y10 plane. */
|
| 205 |
+
cudaEglColorFormatY10_709_ER = 99, /**< Extended Range Color format for single Y10 plane. */
|
| 206 |
+
cudaEglColorFormatY12_ER = 100, /**< Extended Range Color format for single Y12 plane. */
|
| 207 |
+
cudaEglColorFormatY12_709_ER = 101, /**< Extended Range Color format for single Y12 plane. */
|
| 208 |
+
cudaEglColorFormatYUVA = 102, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */
|
| 209 |
+
cudaEglColorFormatYVYU = 104, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */
|
| 210 |
+
cudaEglColorFormatVYUY = 105, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */
|
| 211 |
+
cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 212 |
+
cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 213 |
+
cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
|
| 214 |
+
cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
|
| 215 |
+
cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 216 |
+
cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
|
| 217 |
+
cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
|
| 218 |
+
cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
|
| 219 |
+
} cudaEglColorFormat;
|
| 220 |
+
|
| 221 |
+
/**
|
| 222 |
+
* CUDA EGL Plane Descriptor - structure defining each plane of a CUDA EGLFrame
|
| 223 |
+
*/
|
| 224 |
+
typedef struct cudaEglPlaneDesc_st {
|
| 225 |
+
unsigned int width; /**< Width of plane */
|
| 226 |
+
unsigned int height; /**< Height of plane */
|
| 227 |
+
unsigned int depth; /**< Depth of plane */
|
| 228 |
+
unsigned int pitch; /**< Pitch of plane */
|
| 229 |
+
unsigned int numChannels; /**< Number of channels for the plane */
|
| 230 |
+
struct cudaChannelFormatDesc channelDesc; /**< Channel Format Descriptor */
|
| 231 |
+
unsigned int reserved[4]; /**< Reserved for future use */
|
| 232 |
+
} cudaEglPlaneDesc;
|
| 233 |
+
|
| 234 |
+
/**
|
| 235 |
+
* CUDA EGLFrame Descriptor - structure defining one frame of EGL.
|
| 236 |
+
*
|
| 237 |
+
* Each frame may contain one or more planes depending on whether the surface is Multiplanar or not.
|
| 238 |
+
* Each plane of EGLFrame is represented by ::cudaEglPlaneDesc which is defined as:
|
| 239 |
+
* \code
|
| 240 |
+
* typedef struct cudaEglPlaneDesc_st {
|
| 241 |
+
* unsigned int width;
|
| 242 |
+
* unsigned int height;
|
| 243 |
+
* unsigned int depth;
|
| 244 |
+
* unsigned int pitch;
|
| 245 |
+
* unsigned int numChannels;
|
| 246 |
+
* struct cudaChannelFormatDesc channelDesc;
|
| 247 |
+
* unsigned int reserved[4];
|
| 248 |
+
* } cudaEglPlaneDesc;
|
| 249 |
+
* \endcode
|
| 250 |
+
|
| 251 |
+
*/
|
| 252 |
+
typedef struct cudaEglFrame_st {
|
| 253 |
+
union {
|
| 254 |
+
cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; /**< Array of CUDA arrays corresponding to each plane*/
|
| 255 |
+
struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/
|
| 256 |
+
} frame;
|
| 257 |
+
cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; /**< CUDA EGL Plane Descriptor ::cudaEglPlaneDesc*/
|
| 258 |
+
unsigned int planeCount; /**< Number of planes */
|
| 259 |
+
cudaEglFrameType frameType; /**< Array or Pitch */
|
| 260 |
+
cudaEglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/
|
| 261 |
+
} cudaEglFrame;
|
| 262 |
+
|
| 263 |
+
/**
|
| 264 |
+
* CUDA EGLSream Connection
|
| 265 |
+
*/
|
| 266 |
+
typedef struct CUeglStreamConnection_st *cudaEglStreamConnection;
|
| 267 |
+
|
| 268 |
+
/** @} */ /* END CUDART_TYPES */
|
| 269 |
+
|
| 270 |
+
/**
|
| 271 |
+
* \addtogroup CUDART_EGL EGL Interoperability
|
| 272 |
+
* This section describes the EGL interoperability functions of the CUDA
|
| 273 |
+
* runtime application programming interface.
|
| 274 |
+
*
|
| 275 |
+
* @{
|
| 276 |
+
*/
|
| 277 |
+
|
| 278 |
+
/**
|
| 279 |
+
* \brief Registers an EGL image
|
| 280 |
+
*
|
| 281 |
+
* Registers the EGLImageKHR specified by \p image for access by
|
| 282 |
+
* CUDA. A handle to the registered object is returned as \p pCudaResource.
|
| 283 |
+
* Additional Mapping/Unmapping is not required for the registered resource and
|
| 284 |
+
* ::cudaGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource.
|
| 285 |
+
*
|
| 286 |
+
* The application will be responsible for synchronizing access to shared objects.
|
| 287 |
+
* The application must ensure that any pending operation which access the objects have completed
|
| 288 |
+
* before passing control to CUDA. This may be accomplished by issuing and waiting for
|
| 289 |
+
* glFinish command on all GLcontexts (for OpenGL and likewise for other APIs).
|
| 290 |
+
* The application will be also responsible for ensuring that any pending operation on the
|
| 291 |
+
* registered CUDA resource has completed prior to executing subsequent commands in other APIs
|
| 292 |
+
* accesing the same memory objects.
|
| 293 |
+
* This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably).
|
| 294 |
+
*
|
| 295 |
+
* The surface's intended usage is specified using \p flags, as follows:
|
| 296 |
+
*
|
| 297 |
+
* - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
|
| 298 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 299 |
+
* read from and written to by CUDA. This is the default value.
|
| 300 |
+
* - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
|
| 301 |
+
* will not write to this resource.
|
| 302 |
+
* - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
|
| 303 |
+
* CUDA will not read from this resource and will write over the
|
| 304 |
+
* entire contents of the resource, so none of the data previously
|
| 305 |
+
* stored in the resource will be preserved.
|
| 306 |
+
*
|
| 307 |
+
* The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer.
|
| 308 |
+
* typedef void* EGLImageKHR
|
| 309 |
+
*
|
| 310 |
+
* \param pCudaResource - Pointer to the returned object handle
|
| 311 |
+
* \param image - An EGLImageKHR image which can be used to create target resource.
|
| 312 |
+
* \param flags - Map flags
|
| 313 |
+
*
|
| 314 |
+
* \return
|
| 315 |
+
* ::cudaSuccess,
|
| 316 |
+
* ::cudaErrorInvalidResourceHandle,
|
| 317 |
+
* ::cudaErrorInvalidValue,
|
| 318 |
+
* ::cudaErrorUnknown
|
| 319 |
+
*
|
| 320 |
+
* \sa
|
| 321 |
+
* ::cudaGraphicsUnregisterResource,
|
| 322 |
+
* ::cudaGraphicsResourceGetMappedEglFrame,
|
| 323 |
+
* ::cuGraphicsEGLRegisterImage
|
| 324 |
+
*/
|
| 325 |
+
extern __host__ cudaError_t CUDARTAPI cudaGraphicsEGLRegisterImage(struct cudaGraphicsResource **pCudaResource, EGLImageKHR image, unsigned int flags);
|
| 326 |
+
|
| 327 |
+
/**
|
| 328 |
+
* \brief Connect CUDA to EGLStream as a consumer.
|
| 329 |
+
*
|
| 330 |
+
* Connect CUDA as a consumer to EGLStreamKHR specified by \p eglStream.
|
| 331 |
+
*
|
| 332 |
+
* The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
|
| 333 |
+
* API to another.
|
| 334 |
+
*
|
| 335 |
+
* \param conn - Pointer to the returned connection handle
|
| 336 |
+
* \param eglStream - EGLStreamKHR handle
|
| 337 |
+
*
|
| 338 |
+
* \return
|
| 339 |
+
* ::cudaSuccess,
|
| 340 |
+
* ::cudaErrorInvalidValue,
|
| 341 |
+
* ::cudaErrorUnknown
|
| 342 |
+
*
|
| 343 |
+
* \sa
|
| 344 |
+
* ::cudaEGLStreamConsumerDisconnect,
|
| 345 |
+
* ::cudaEGLStreamConsumerAcquireFrame,
|
| 346 |
+
* ::cudaEGLStreamConsumerReleaseFrame,
|
| 347 |
+
* ::cuEGLStreamConsumerConnect
|
| 348 |
+
*/
|
| 349 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnect(cudaEglStreamConnection *conn, EGLStreamKHR eglStream);
|
| 350 |
+
|
| 351 |
+
/**
|
| 352 |
+
* \brief Connect CUDA to EGLStream as a consumer with given flags.
|
| 353 |
+
*
|
| 354 |
+
* Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by
|
| 355 |
+
* ::cudaEglResourceLocationFlags.
|
| 356 |
+
*
|
| 357 |
+
* The flags specify whether the consumer wants to access frames from system memory or video memory.
|
| 358 |
+
* Default is ::cudaEglResourceLocationVidmem.
|
| 359 |
+
*
|
| 360 |
+
* \param conn - Pointer to the returned connection handle
|
| 361 |
+
* \param eglStream - EGLStreamKHR handle
|
| 362 |
+
* \param flags - Flags denote intended location - system or video.
|
| 363 |
+
*
|
| 364 |
+
* \return
|
| 365 |
+
* ::cudaSuccess,
|
| 366 |
+
* ::cudaErrorInvalidValue,
|
| 367 |
+
* ::cudaErrorUnknown
|
| 368 |
+
*
|
| 369 |
+
* \sa
|
| 370 |
+
* ::cudaEGLStreamConsumerDisconnect,
|
| 371 |
+
* ::cudaEGLStreamConsumerAcquireFrame,
|
| 372 |
+
* ::cudaEGLStreamConsumerReleaseFrame,
|
| 373 |
+
* ::cuEGLStreamConsumerConnectWithFlags
|
| 374 |
+
*/
|
| 375 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection *conn, EGLStreamKHR eglStream, unsigned int flags);
|
| 376 |
+
|
| 377 |
+
/**
|
| 378 |
+
* \brief Disconnect CUDA as a consumer to EGLStream .
|
| 379 |
+
*
|
| 380 |
+
* Disconnect CUDA as a consumer to EGLStreamKHR.
|
| 381 |
+
*
|
| 382 |
+
* \param conn - Conection to disconnect.
|
| 383 |
+
*
|
| 384 |
+
* \return
|
| 385 |
+
* ::cudaSuccess,
|
| 386 |
+
* ::cudaErrorInvalidValue,
|
| 387 |
+
* ::cudaErrorUnknown
|
| 388 |
+
*
|
| 389 |
+
* \sa
|
| 390 |
+
* ::cudaEGLStreamConsumerConnect,
|
| 391 |
+
* ::cudaEGLStreamConsumerAcquireFrame,
|
| 392 |
+
* ::cudaEGLStreamConsumerReleaseFrame,
|
| 393 |
+
* ::cuEGLStreamConsumerDisconnect
|
| 394 |
+
*/
|
| 395 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection *conn);
|
| 396 |
+
|
| 397 |
+
/**
|
| 398 |
+
* \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
|
| 399 |
+
*
|
| 400 |
+
* Acquire an image frame from EGLStreamKHR.
|
| 401 |
+
* ::cudaGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get
|
| 402 |
+
* ::cudaEglFrame.
|
| 403 |
+
*
|
| 404 |
+
* \param conn - Connection on which to acquire
|
| 405 |
+
* \param pCudaResource - CUDA resource on which the EGLStream frame will be mapped for use.
|
| 406 |
+
* \param pStream - CUDA stream for synchronization and any data migrations
|
| 407 |
+
* implied by ::cudaEglResourceLocationFlags.
|
| 408 |
+
* \param timeout - Desired timeout in usec.
|
| 409 |
+
*
|
| 410 |
+
* \return
|
| 411 |
+
* ::cudaSuccess,
|
| 412 |
+
* ::cudaErrorInvalidValue,
|
| 413 |
+
* ::cudaErrorUnknown,
|
| 414 |
+
* ::cudaErrorLaunchTimeout
|
| 415 |
+
*
|
| 416 |
+
* \sa
|
| 417 |
+
* ::cudaEGLStreamConsumerConnect,
|
| 418 |
+
* ::cudaEGLStreamConsumerDisconnect,
|
| 419 |
+
* ::cudaEGLStreamConsumerReleaseFrame,
|
| 420 |
+
* ::cuEGLStreamConsumerAcquireFrame
|
| 421 |
+
*/
|
| 422 |
+
|
| 423 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection *conn,
|
| 424 |
+
cudaGraphicsResource_t *pCudaResource, cudaStream_t *pStream, unsigned int timeout);
|
| 425 |
+
/**
|
| 426 |
+
* \brief Releases the last frame acquired from the EGLStream.
|
| 427 |
+
*
|
| 428 |
+
* Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
|
| 429 |
+
*
|
| 430 |
+
* \param conn - Connection on which to release
|
| 431 |
+
* \param pCudaResource - CUDA resource whose corresponding frame is to be released
|
| 432 |
+
* \param pStream - CUDA stream on which release will be done.
|
| 433 |
+
*
|
| 434 |
+
* \return
|
| 435 |
+
* ::cudaSuccess,
|
| 436 |
+
* ::cudaErrorInvalidValue,
|
| 437 |
+
* ::cudaErrorUnknown
|
| 438 |
+
*
|
| 439 |
+
* \sa
|
| 440 |
+
* ::cudaEGLStreamConsumerConnect,
|
| 441 |
+
* ::cudaEGLStreamConsumerDisconnect,
|
| 442 |
+
* ::cudaEGLStreamConsumerAcquireFrame,
|
| 443 |
+
* ::cuEGLStreamConsumerReleaseFrame
|
| 444 |
+
*/
|
| 445 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection *conn,
|
| 446 |
+
cudaGraphicsResource_t pCudaResource, cudaStream_t *pStream);
|
| 447 |
+
|
| 448 |
+
/**
|
| 449 |
+
* \brief Connect CUDA to EGLStream as a producer.
|
| 450 |
+
*
|
| 451 |
+
* Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
|
| 452 |
+
*
|
| 453 |
+
* The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
|
| 454 |
+
* API to another.
|
| 455 |
+
*
|
| 456 |
+
* \param conn - Pointer to the returned connection handle
|
| 457 |
+
* \param eglStream - EGLStreamKHR handle
|
| 458 |
+
* \param width - width of the image to be submitted to the stream
|
| 459 |
+
* \param height - height of the image to be submitted to the stream
|
| 460 |
+
*
|
| 461 |
+
* \return
|
| 462 |
+
* ::cudaSuccess,
|
| 463 |
+
* ::cudaErrorInvalidValue,
|
| 464 |
+
* ::cudaErrorUnknown
|
| 465 |
+
*
|
| 466 |
+
* \sa
|
| 467 |
+
* ::cudaEGLStreamProducerDisconnect,
|
| 468 |
+
* ::cudaEGLStreamProducerPresentFrame,
|
| 469 |
+
* ::cudaEGLStreamProducerReturnFrame,
|
| 470 |
+
* ::cuEGLStreamProducerConnect
|
| 471 |
+
*/
|
| 472 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerConnect(cudaEglStreamConnection *conn,
|
| 473 |
+
EGLStreamKHR eglStream, EGLint width, EGLint height);
|
| 474 |
+
|
| 475 |
+
/**
|
| 476 |
+
* \brief Disconnect CUDA as a producer to EGLStream .
|
| 477 |
+
*
|
| 478 |
+
* Disconnect CUDA as a producer to EGLStreamKHR.
|
| 479 |
+
*
|
| 480 |
+
* \param conn - Conection to disconnect.
|
| 481 |
+
*
|
| 482 |
+
* \return
|
| 483 |
+
* ::cudaSuccess,
|
| 484 |
+
* ::cudaErrorInvalidValue,
|
| 485 |
+
* ::cudaErrorUnknown
|
| 486 |
+
*
|
| 487 |
+
* \sa
|
| 488 |
+
* ::cudaEGLStreamProducerConnect,
|
| 489 |
+
* ::cudaEGLStreamProducerPresentFrame,
|
| 490 |
+
* ::cudaEGLStreamProducerReturnFrame,
|
| 491 |
+
* ::cuEGLStreamProducerDisconnect
|
| 492 |
+
*/
|
| 493 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerDisconnect(cudaEglStreamConnection *conn);
|
| 494 |
+
|
| 495 |
+
/**
|
| 496 |
+
* \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
|
| 497 |
+
*
|
| 498 |
+
* The ::cudaEglFrame is defined as:
|
| 499 |
+
* \code
|
| 500 |
+
* typedef struct cudaEglFrame_st {
|
| 501 |
+
* union {
|
| 502 |
+
* cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
|
| 503 |
+
* struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
|
| 504 |
+
* } frame;
|
| 505 |
+
* cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
|
| 506 |
+
* unsigned int planeCount;
|
| 507 |
+
* cudaEglFrameType frameType;
|
| 508 |
+
* cudaEglColorFormat eglColorFormat;
|
| 509 |
+
* } cudaEglFrame;
|
| 510 |
+
* \endcode
|
| 511 |
+
*
|
| 512 |
+
* For ::cudaEglFrame of type ::cudaEglFrameTypePitch, the application may present sub-region of a memory
|
| 513 |
+
* allocation. In that case, ::cudaPitchedPtr::ptr will specify the start address of the sub-region in
|
| 514 |
+
* the allocation and ::cudaEglPlaneDesc will specify the dimensions of the sub-region.
|
| 515 |
+
*
|
| 516 |
+
* \param conn - Connection on which to present the CUDA array
|
| 517 |
+
* \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream.
|
| 518 |
+
* \param pStream - CUDA stream on which to present the frame.
|
| 519 |
+
*
|
| 520 |
+
* \return
|
| 521 |
+
* ::cudaSuccess,
|
| 522 |
+
* ::cudaErrorInvalidValue,
|
| 523 |
+
* ::cudaErrorUnknown
|
| 524 |
+
*
|
| 525 |
+
* \sa
|
| 526 |
+
* ::cudaEGLStreamProducerConnect,
|
| 527 |
+
* ::cudaEGLStreamProducerDisconnect,
|
| 528 |
+
* ::cudaEGLStreamProducerReturnFrame,
|
| 529 |
+
* ::cuEGLStreamProducerPresentFrame
|
| 530 |
+
*/
|
| 531 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection *conn,
|
| 532 |
+
cudaEglFrame eglframe, cudaStream_t *pStream);
|
| 533 |
+
|
| 534 |
+
/**
|
| 535 |
+
* \brief Return the CUDA eglFrame to the EGLStream last released by the consumer.
|
| 536 |
+
*
|
| 537 |
+
* This API can potentially return cudaErrorLaunchTimeout if the consumer has not
|
| 538 |
+
* returned a frame to EGL stream. If timeout is returned the application can retry.
|
| 539 |
+
*
|
| 540 |
+
* \param conn - Connection on which to present the CUDA array
|
| 541 |
+
* \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream.
|
| 542 |
+
* \param pStream - CUDA stream on which to return the frame.
|
| 543 |
+
*
|
| 544 |
+
* \return
|
| 545 |
+
* ::cudaSuccess,
|
| 546 |
+
* ::cudaErrorLaunchTimeout,
|
| 547 |
+
* ::cudaErrorInvalidValue,
|
| 548 |
+
* ::cudaErrorUnknown
|
| 549 |
+
*
|
| 550 |
+
* \sa
|
| 551 |
+
* ::cudaEGLStreamProducerConnect,
|
| 552 |
+
* ::cudaEGLStreamProducerDisconnect,
|
| 553 |
+
* ::cudaEGLStreamProducerPresentFrame,
|
| 554 |
+
* ::cuEGLStreamProducerReturnFrame
|
| 555 |
+
*/
|
| 556 |
+
extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection *conn,
|
| 557 |
+
cudaEglFrame *eglframe, cudaStream_t *pStream);
|
| 558 |
+
|
| 559 |
+
/**
|
| 560 |
+
* \brief Get an eglFrame through which to access a registered EGL graphics resource.
|
| 561 |
+
*
|
| 562 |
+
* Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
|
| 563 |
+
* \p resource may be accessed.
|
| 564 |
+
* This API can only be called for EGL graphics resources.
|
| 565 |
+
*
|
| 566 |
+
* The ::cudaEglFrame is defined as
|
| 567 |
+
* \code
|
| 568 |
+
* typedef struct cudaEglFrame_st {
|
| 569 |
+
* union {
|
| 570 |
+
* cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
|
| 571 |
+
* struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
|
| 572 |
+
* } frame;
|
| 573 |
+
* cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
|
| 574 |
+
* unsigned int planeCount;
|
| 575 |
+
* cudaEglFrameType frameType;
|
| 576 |
+
* cudaEglColorFormat eglColorFormat;
|
| 577 |
+
* } cudaEglFrame;
|
| 578 |
+
* \endcode
|
| 579 |
+
*
|
| 580 |
+
*
|
| 581 |
+
* \param eglFrame - Returned eglFrame.
|
| 582 |
+
* \param resource - Registered resource to access.
|
| 583 |
+
* \param index - Index for cubemap surfaces.
|
| 584 |
+
* \param mipLevel - Mipmap level for the subresource to access.
|
| 585 |
+
*
|
| 586 |
+
* \return
|
| 587 |
+
* ::cudaSuccess,
|
| 588 |
+
* ::cudaErrorInvalidValue,
|
| 589 |
+
* ::cudaErrorUnknown
|
| 590 |
+
*
|
| 591 |
+
* \note Note that in case of multiplanar \p *eglFrame, pitch of only first plane (unsigned int cudaEglPlaneDesc::pitch) is to be considered by the application.
|
| 592 |
+
*
|
| 593 |
+
* \sa
|
| 594 |
+
* ::cudaGraphicsSubResourceGetMappedArray,
|
| 595 |
+
* ::cudaGraphicsResourceGetMappedPointer,
|
| 596 |
+
* ::cuGraphicsResourceGetMappedEglFrame
|
| 597 |
+
*/
|
| 598 |
+
extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame,
|
| 599 |
+
cudaGraphicsResource_t resource, unsigned int index, unsigned int mipLevel);
|
| 600 |
+
|
| 601 |
+
/**
|
| 602 |
+
* \brief Creates an event from EGLSync object
|
| 603 |
+
*
|
| 604 |
+
* Creates an event *phEvent from an EGLSyncKHR eglSync with the flages specified
|
| 605 |
+
* via \p flags. Valid flags include:
|
| 606 |
+
* - ::cudaEventDefault: Default event creation flag.
|
| 607 |
+
* - ::cudaEventBlockingSync: Specifies that the created event should use blocking
|
| 608 |
+
* synchronization. A CPU thread that uses ::cudaEventSynchronize() to wait on
|
| 609 |
+
* an event created with this flag will block until the event has actually
|
| 610 |
+
* been completed.
|
| 611 |
+
*
|
| 612 |
+
* ::cudaEventRecord and TimingData are not supported for events created from EGLSync.
|
| 613 |
+
*
|
| 614 |
+
* The EGLSyncKHR is an opaque handle to an EGL sync object.
|
| 615 |
+
* typedef void* EGLSyncKHR
|
| 616 |
+
*
|
| 617 |
+
* \param phEvent - Returns newly created event
|
| 618 |
+
* \param eglSync - Opaque handle to EGLSync object
|
| 619 |
+
* \param flags - Event creation flags
|
| 620 |
+
*
|
| 621 |
+
* \return
|
| 622 |
+
* ::cudaSuccess,
|
| 623 |
+
* ::cudaErrorInitializationError,
|
| 624 |
+
* ::cudaErrorInvalidValue,
|
| 625 |
+
* ::cudaErrorLaunchFailure,
|
| 626 |
+
* ::cudaErrorMemoryAllocation
|
| 627 |
+
*
|
| 628 |
+
* \sa
|
| 629 |
+
* ::cudaEventQuery,
|
| 630 |
+
* ::cudaEventSynchronize,
|
| 631 |
+
* ::cudaEventDestroy
|
| 632 |
+
*/
|
| 633 |
+
extern __host__ cudaError_t CUDARTAPI cudaEventCreateFromEGLSync(cudaEvent_t *phEvent, EGLSyncKHR eglSync, unsigned int flags);
|
| 634 |
+
|
| 635 |
+
/** @} */ /* END CUDART_EGL */
|
| 636 |
+
|
| 637 |
+
#if defined(__cplusplus)
|
| 638 |
+
}
|
| 639 |
+
#endif /* __cplusplus */
|
| 640 |
+
|
| 641 |
+
#endif /* __CUDA_EGL_INTEROP_H__ */
|
| 642 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp
ADDED
|
@@ -0,0 +1,1546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_FP8_HPP__)
|
| 51 |
+
#define __CUDA_FP8_HPP__
|
| 52 |
+
|
| 53 |
+
#if !defined(__CUDA_FP8_H__)
|
| 54 |
+
#error "Do not include this file directly. Instead, include cuda_fp8.h."
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
/* C++ header for std::memcpy (used for type punning in host-side
|
| 58 |
+
* implementations). When compiling as a CUDA source file memcpy is provided
|
| 59 |
+
* implicitly. !defined(__CUDACC__) implies !defined(__CUDACC_RTC__).
|
| 60 |
+
*/
|
| 61 |
+
#if defined(__cplusplus) && !defined(__CUDACC__)
|
| 62 |
+
#include <cstring>
|
| 63 |
+
#elif !defined(__cplusplus) && !defined(__CUDACC__)
|
| 64 |
+
#include <string.h>
|
| 65 |
+
#endif /* defined(__cplusplus) && !defined(__CUDACC__) */
|
| 66 |
+
|
| 67 |
+
/* Set up structure-alignment attribute */
|
| 68 |
+
#if !(defined __CUDA_ALIGN__)
|
| 69 |
+
#if defined(__CUDACC__)
|
| 70 |
+
#define __CUDA_ALIGN__(align) __align__(align)
|
| 71 |
+
#else
|
| 72 |
+
/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas"
|
| 73 |
+
* is available) */
|
| 74 |
+
#if __cplusplus >= 201103L
|
| 75 |
+
#define __CUDA_ALIGN__(n) \
|
| 76 |
+
alignas(n) /* C++11 kindly gives us a keyword for this */
|
| 77 |
+
#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP8)*/
|
| 78 |
+
#if defined(__GNUC__)
|
| 79 |
+
#define __CUDA_ALIGN__(n) __attribute__((aligned(n)))
|
| 80 |
+
#elif defined(_MSC_VER)
|
| 81 |
+
#define __CUDA_ALIGN__(n) __declspec(align(n))
|
| 82 |
+
#else
|
| 83 |
+
#define __CUDA_ALIGN__(n)
|
| 84 |
+
#endif /* defined(__GNUC__) */
|
| 85 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 86 |
+
#endif /* defined(__CUDACC__) */
|
| 87 |
+
#endif /* !(defined __CUDA_ALIGN__) */
|
| 88 |
+
|
| 89 |
+
#if !(defined __CPP_VERSION_AT_LEAST_11_FP8)
|
| 90 |
+
/* need c++11 for explicit operators */
|
| 91 |
+
#define __CUDA_NO_FP8_CONVERSION_OPERATORS__
|
| 92 |
+
#endif
|
| 93 |
+
|
| 94 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
|
| 95 |
+
__nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate,
|
| 96 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 97 |
+
unsigned char res;
|
| 98 |
+
unsigned long long int xbits;
|
| 99 |
+
|
| 100 |
+
#if defined(__CUDACC__) || (!defined __cplusplus)
|
| 101 |
+
(void)memcpy(&xbits, &x, sizeof(x));
|
| 102 |
+
#else
|
| 103 |
+
(void)std::memcpy(&xbits, &x, sizeof(x));
|
| 104 |
+
#endif
|
| 105 |
+
unsigned char FP8_MAXNORM;
|
| 106 |
+
unsigned char FP8_MANTISSA_MASK;
|
| 107 |
+
unsigned short int FP8_EXP_BIAS;
|
| 108 |
+
unsigned long long int FP8_SIGNIFICAND_BITS;
|
| 109 |
+
const unsigned long long int DP_INF_BITS = 0x7FF0000000000000ULL;
|
| 110 |
+
unsigned long long int FP8_MINDENORM_O2;
|
| 111 |
+
unsigned long long int FP8_OVERFLOW_THRESHOLD;
|
| 112 |
+
unsigned long long int FP8_MINNORM;
|
| 113 |
+
|
| 114 |
+
if (fp8_interpretation == __NV_E4M3) {
|
| 115 |
+
FP8_EXP_BIAS = 7U;
|
| 116 |
+
FP8_SIGNIFICAND_BITS = 4ULL;
|
| 117 |
+
FP8_MANTISSA_MASK = 0x7U;
|
| 118 |
+
FP8_MINDENORM_O2 = 0x3F50000000000000ULL; // mindenorm/2 = 2^-10
|
| 119 |
+
FP8_OVERFLOW_THRESHOLD =
|
| 120 |
+
0x407D000000000000ULL; // maxnorm + 1/2ulp = 0x1.Cp+8 + 0x1p+4
|
| 121 |
+
FP8_MAXNORM = 0x7EU;
|
| 122 |
+
FP8_MINNORM = 0x3F90000000000000ULL; // minnorm = 2^-6
|
| 123 |
+
} else { //__NV_E5M2
|
| 124 |
+
FP8_EXP_BIAS = 15U;
|
| 125 |
+
FP8_SIGNIFICAND_BITS = 3ULL;
|
| 126 |
+
FP8_MANTISSA_MASK = 0x3U;
|
| 127 |
+
FP8_MINDENORM_O2 = 0x3EE0000000000000ULL; // mindenorm/2 = 2^-17
|
| 128 |
+
FP8_OVERFLOW_THRESHOLD =
|
| 129 |
+
0x40EE000000000000ULL -
|
| 130 |
+
1ULL; // maxnorm + 1/2ulp = 0x1.Ep+15, and -1 to have common code
|
| 131 |
+
FP8_MAXNORM = 0x7BU;
|
| 132 |
+
FP8_MINNORM = 0x3F10000000000000ULL; // minnorm = 2^-14
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
// 1/2 LSB of the target format, positioned in double precision mantissa
|
| 136 |
+
// helpful in midpoints detection during round-to-nearest-even step
|
| 137 |
+
const unsigned long long int FP8_DP_HALF_ULP =
|
| 138 |
+
(unsigned long long int)1ULL << (53ULL - FP8_SIGNIFICAND_BITS - 1ULL);
|
| 139 |
+
// prepare sign bit in target format
|
| 140 |
+
unsigned char sign = (unsigned char)((xbits >> 63ULL) << 7U);
|
| 141 |
+
// prepare exponent field in target format
|
| 142 |
+
unsigned char exp =
|
| 143 |
+
(unsigned char)((((unsigned short int)(xbits >> 52ULL)) & 0x7FFU) -
|
| 144 |
+
1023U + FP8_EXP_BIAS);
|
| 145 |
+
// round mantissa to target format width, rounding towards zero
|
| 146 |
+
unsigned char mantissa =
|
| 147 |
+
(unsigned char)(xbits >> (53ULL - FP8_SIGNIFICAND_BITS)) &
|
| 148 |
+
FP8_MANTISSA_MASK;
|
| 149 |
+
unsigned long long int absx = xbits & 0x7FFFFFFFFFFFFFFFULL;
|
| 150 |
+
|
| 151 |
+
if (absx <= FP8_MINDENORM_O2) {
|
| 152 |
+
// zero or underflow
|
| 153 |
+
res = 0U;
|
| 154 |
+
} else if (absx > DP_INF_BITS) {
|
| 155 |
+
// NaN
|
| 156 |
+
if (fp8_interpretation == __NV_E4M3) {
|
| 157 |
+
res = 0x7FU;
|
| 158 |
+
} else {
|
| 159 |
+
// NaN --> QNaN
|
| 160 |
+
res = 0x7EU | mantissa;
|
| 161 |
+
}
|
| 162 |
+
} else if (absx > FP8_OVERFLOW_THRESHOLD) {
|
| 163 |
+
if (saturate == __NV_SATFINITE) {
|
| 164 |
+
res = FP8_MAXNORM;
|
| 165 |
+
} else {
|
| 166 |
+
// __NV_NOSAT
|
| 167 |
+
if (fp8_interpretation == __NV_E4M3) {
|
| 168 |
+
// no Inf in E4M3
|
| 169 |
+
res = 0x7FU; // NaN
|
| 170 |
+
} else {
|
| 171 |
+
res = 0x7CU; // Inf in E5M2
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
} else if (absx >= FP8_MINNORM) {
|
| 175 |
+
res = (unsigned char)((exp << (FP8_SIGNIFICAND_BITS - 1U)) | mantissa);
|
| 176 |
+
// rounded-off bits
|
| 177 |
+
unsigned long long int round =
|
| 178 |
+
xbits & ((FP8_DP_HALF_ULP << 1ULL) - 1ULL);
|
| 179 |
+
// round-to-nearest-even adjustment
|
| 180 |
+
if ((round > FP8_DP_HALF_ULP) ||
|
| 181 |
+
((round == FP8_DP_HALF_ULP) && (mantissa & 1U))) {
|
| 182 |
+
res = (unsigned char)(res + 1U);
|
| 183 |
+
}
|
| 184 |
+
} else // Denormal range
|
| 185 |
+
{
|
| 186 |
+
unsigned char shift = (unsigned char)(1U - exp);
|
| 187 |
+
// add implicit leading bit
|
| 188 |
+
mantissa |= (unsigned char)(1U << (FP8_SIGNIFICAND_BITS - 1U));
|
| 189 |
+
// additional round-off due to denormalization
|
| 190 |
+
res = (unsigned char)(mantissa >> shift);
|
| 191 |
+
|
| 192 |
+
// rounded-off bits, including implicit leading bit
|
| 193 |
+
unsigned long long int round =
|
| 194 |
+
(xbits | ((unsigned long long int)1ULL << (53ULL - 1ULL))) &
|
| 195 |
+
((FP8_DP_HALF_ULP << (shift + 1ULL)) - 1ULL);
|
| 196 |
+
// round-to-nearest-even adjustment
|
| 197 |
+
if ((round > (FP8_DP_HALF_ULP << shift)) ||
|
| 198 |
+
((round == (FP8_DP_HALF_ULP << shift)) && (res & 1U))) {
|
| 199 |
+
res = (unsigned char)(res + 1U);
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
res |= sign;
|
| 204 |
+
|
| 205 |
+
return (__nv_fp8_storage_t)res;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
|
| 209 |
+
__nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate,
|
| 210 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 211 |
+
__nv_fp8x2_storage_t storage = (__nv_fp8x2_storage_t)__nv_cvt_double_to_fp8(
|
| 212 |
+
x.y, saturate, fp8_interpretation);
|
| 213 |
+
storage = (__nv_fp8x2_storage_t)(storage << 8U);
|
| 214 |
+
storage = (__nv_fp8x2_storage_t)(storage |
|
| 215 |
+
__nv_cvt_double_to_fp8(
|
| 216 |
+
x.x, saturate, fp8_interpretation));
|
| 217 |
+
return storage;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
|
| 221 |
+
__nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate,
|
| 222 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 223 |
+
__nv_fp8_storage_t res = 0U;
|
| 224 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
|
| 225 |
+
if (saturate == __NV_SATFINITE) {
|
| 226 |
+
__nv_fp8x2_storage_t storage;
|
| 227 |
+
if (fp8_interpretation == __NV_E5M2) {
|
| 228 |
+
asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
|
| 229 |
+
: "=h"(storage)
|
| 230 |
+
: "f"(x), "f"(0.0f));
|
| 231 |
+
} else {
|
| 232 |
+
asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
|
| 233 |
+
: "=h"(storage)
|
| 234 |
+
: "f"(x), "f"(0.0f));
|
| 235 |
+
}
|
| 236 |
+
res = (__nv_fp8_storage_t)storage;
|
| 237 |
+
} else
|
| 238 |
+
#endif
|
| 239 |
+
{
|
| 240 |
+
unsigned int xbits;
|
| 241 |
+
#if defined(__CUDACC__) || (!defined __cplusplus)
|
| 242 |
+
(void)memcpy(&xbits, &x, sizeof(x));
|
| 243 |
+
#else
|
| 244 |
+
(void)std::memcpy(&xbits, &x, sizeof(x));
|
| 245 |
+
#endif
|
| 246 |
+
|
| 247 |
+
// isnan
|
| 248 |
+
if ((xbits & 0x7FFFFFFFU) > 0x7F800000U) {
|
| 249 |
+
// Canonical NaN
|
| 250 |
+
xbits = 0x7FFFFFFFU;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
float fx;
|
| 254 |
+
#if defined(__CUDACC__) || (!defined __cplusplus)
|
| 255 |
+
(void)memcpy(&fx, &xbits, sizeof(xbits));
|
| 256 |
+
#else
|
| 257 |
+
(void)std::memcpy(&fx, &xbits, sizeof(xbits));
|
| 258 |
+
#endif
|
| 259 |
+
|
| 260 |
+
const double dx = (double)fx;
|
| 261 |
+
res = __nv_cvt_double_to_fp8(dx, saturate, fp8_interpretation);
|
| 262 |
+
}
|
| 263 |
+
return res;
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
|
| 267 |
+
__nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate,
|
| 268 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 269 |
+
__nv_fp8x2_storage_t storage;
|
| 270 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
|
| 271 |
+
if (saturate == __NV_SATFINITE) {
|
| 272 |
+
if (fp8_interpretation == __NV_E5M2) {
|
| 273 |
+
asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n"
|
| 274 |
+
: "=h"(storage)
|
| 275 |
+
: "f"(x.x), "f"(x.y));
|
| 276 |
+
} else {
|
| 277 |
+
asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n"
|
| 278 |
+
: "=h"(storage)
|
| 279 |
+
: "f"(x.x), "f"(x.y));
|
| 280 |
+
}
|
| 281 |
+
} else
|
| 282 |
+
#endif
|
| 283 |
+
{
|
| 284 |
+
storage = (__nv_fp8x2_storage_t)__nv_cvt_float_to_fp8(
|
| 285 |
+
x.y, saturate, fp8_interpretation);
|
| 286 |
+
storage = (__nv_fp8x2_storage_t)(storage << 8U);
|
| 287 |
+
storage = (__nv_fp8x2_storage_t)(storage | __nv_cvt_float_to_fp8(
|
| 288 |
+
x.x, saturate,
|
| 289 |
+
fp8_interpretation));
|
| 290 |
+
}
|
| 291 |
+
return storage;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ float
|
| 295 |
+
__internal_halfraw_to_float(const __half_raw x) {
|
| 296 |
+
float f;
|
| 297 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
|
| 298 |
+
asm("{cvt.f32.f16 %0, %1;}\n" : "=f"(f) : "h"(x.x));
|
| 299 |
+
#else
|
| 300 |
+
const unsigned int ux = (unsigned int)x.x;
|
| 301 |
+
unsigned int sign = (ux >> 15U) & 1U;
|
| 302 |
+
unsigned int exponent = (ux >> 10U) & 0x1fU;
|
| 303 |
+
unsigned int mantissa = (ux & 0x3ffU) << 13U;
|
| 304 |
+
if (exponent == 0x1fU) { /* NaN or Inf */
|
| 305 |
+
/* discard sign of a NaN */
|
| 306 |
+
sign = ((mantissa != 0U) ? (sign >> 1U) : sign);
|
| 307 |
+
mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U);
|
| 308 |
+
exponent = 0xffU;
|
| 309 |
+
} else if (exponent == 0U) { /* Denorm or Zero */
|
| 310 |
+
if (mantissa != 0U) {
|
| 311 |
+
unsigned int msb;
|
| 312 |
+
exponent = 0x71U;
|
| 313 |
+
do {
|
| 314 |
+
msb = (mantissa & 0x400000U);
|
| 315 |
+
mantissa <<= 1U; /* normalize */
|
| 316 |
+
--exponent;
|
| 317 |
+
} while (msb == 0U);
|
| 318 |
+
mantissa &= 0x7fffffU; /* 1.mantissa is implicit */
|
| 319 |
+
}
|
| 320 |
+
} else {
|
| 321 |
+
exponent += 0x70U;
|
| 322 |
+
}
|
| 323 |
+
const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa);
|
| 324 |
+
#if defined(__CUDACC__) || (!defined __cplusplus)
|
| 325 |
+
(void)memcpy(&f, &u, sizeof(u));
|
| 326 |
+
#else
|
| 327 |
+
(void)std::memcpy(&f, &u, sizeof(u));
|
| 328 |
+
#endif
|
| 329 |
+
#endif /* (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) */
|
| 330 |
+
return f;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ float2
|
| 334 |
+
__internal_halfraw2_to_float2(const __half2_raw x) {
|
| 335 |
+
__half_raw raw;
|
| 336 |
+
float2 res;
|
| 337 |
+
raw.x = x.x;
|
| 338 |
+
res.x = __internal_halfraw_to_float(raw);
|
| 339 |
+
raw.x = x.y;
|
| 340 |
+
res.y = __internal_halfraw_to_float(raw);
|
| 341 |
+
return res;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t
|
| 345 |
+
__nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate,
|
| 346 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 347 |
+
__nv_fp8_storage_t res = 0U;
|
| 348 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
|
| 349 |
+
if (saturate == __NV_SATFINITE) {
|
| 350 |
+
unsigned int half2_storage = (unsigned int)(x.x);
|
| 351 |
+
__nv_fp8x2_storage_t tmp;
|
| 352 |
+
if (fp8_interpretation == __NV_E5M2) {
|
| 353 |
+
asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
|
| 354 |
+
: "=h"(tmp)
|
| 355 |
+
: "r"(half2_storage));
|
| 356 |
+
} else {
|
| 357 |
+
asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
|
| 358 |
+
: "=h"(tmp)
|
| 359 |
+
: "r"(half2_storage));
|
| 360 |
+
}
|
| 361 |
+
res = (__nv_fp8_storage_t)tmp;
|
| 362 |
+
} else
|
| 363 |
+
#endif
|
| 364 |
+
{
|
| 365 |
+
float fx = __internal_halfraw_to_float(x);
|
| 366 |
+
res = __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
|
| 367 |
+
}
|
| 368 |
+
return res;
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2(
|
| 372 |
+
const __half2_raw x, const __nv_saturation_t saturate,
|
| 373 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 374 |
+
__nv_fp8x2_storage_t tmp;
|
| 375 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
|
| 376 |
+
if (saturate == __NV_SATFINITE) {
|
| 377 |
+
unsigned int half2_storage;
|
| 378 |
+
(void)memcpy(&half2_storage, &x, sizeof(x));
|
| 379 |
+
|
| 380 |
+
if (fp8_interpretation == __NV_E5M2) {
|
| 381 |
+
asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n"
|
| 382 |
+
: "=h"(tmp)
|
| 383 |
+
: "r"(half2_storage));
|
| 384 |
+
} else {
|
| 385 |
+
asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n"
|
| 386 |
+
: "=h"(tmp)
|
| 387 |
+
: "r"(half2_storage));
|
| 388 |
+
}
|
| 389 |
+
} else
|
| 390 |
+
#endif
|
| 391 |
+
{
|
| 392 |
+
__half_raw raw;
|
| 393 |
+
raw.x = x.x;
|
| 394 |
+
__nv_fp8_storage_t lo =
|
| 395 |
+
__nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
|
| 396 |
+
raw.x = x.y;
|
| 397 |
+
__nv_fp8_storage_t hi =
|
| 398 |
+
__nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation);
|
| 399 |
+
tmp = hi;
|
| 400 |
+
tmp = (__nv_fp8x2_storage_t)(tmp << 8U);
|
| 401 |
+
tmp = (__nv_fp8x2_storage_t)(tmp | lo);
|
| 402 |
+
}
|
| 403 |
+
return tmp;
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ float
|
| 407 |
+
__internal_bf16raw_to_float(const __nv_bfloat16_raw x) {
|
| 408 |
+
const unsigned int ux = ((unsigned int)x.x) << 16U;
|
| 409 |
+
float fx;
|
| 410 |
+
#if defined(__CUDACC__) || (!defined __cplusplus)
|
| 411 |
+
(void)memcpy(&fx, &ux, sizeof(ux));
|
| 412 |
+
#else
|
| 413 |
+
(void)std::memcpy(&fx, &ux, sizeof(ux));
|
| 414 |
+
#endif
|
| 415 |
+
return fx;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_bfloat16_raw
|
| 419 |
+
__internal_float_to_bf16raw_rz(const float x) {
|
| 420 |
+
unsigned int ux;
|
| 421 |
+
__nv_bfloat16_raw r;
|
| 422 |
+
#if defined(__CUDACC__) || (!defined __cplusplus)
|
| 423 |
+
(void)memcpy(&ux, &x, sizeof(x));
|
| 424 |
+
#else
|
| 425 |
+
(void)std::memcpy(&ux, &x, sizeof(x));
|
| 426 |
+
#endif
|
| 427 |
+
r.x = (unsigned short int)(ux >> 16U);
|
| 428 |
+
return r;
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8(
|
| 432 |
+
const __nv_bfloat16_raw x, const __nv_saturation_t saturate,
|
| 433 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 434 |
+
const float fx = __internal_bf16raw_to_float(x);
|
| 435 |
+
const __nv_fp8_storage_t res =
|
| 436 |
+
__nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation);
|
| 437 |
+
return res;
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t
|
| 441 |
+
__nv_cvt_bfloat16raw2_to_fp8x2(
|
| 442 |
+
const __nv_bfloat162_raw x, const __nv_saturation_t saturate,
|
| 443 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 444 |
+
__nv_bfloat16_raw raw;
|
| 445 |
+
raw.x = x.y;
|
| 446 |
+
__nv_fp8x2_storage_t storage =
|
| 447 |
+
(__nv_fp8x2_storage_t)__nv_cvt_bfloat16raw_to_fp8(raw, saturate,
|
| 448 |
+
fp8_interpretation);
|
| 449 |
+
storage = (__nv_fp8x2_storage_t)(storage << 8U);
|
| 450 |
+
raw.x = x.x;
|
| 451 |
+
storage = (__nv_fp8x2_storage_t)(storage |
|
| 452 |
+
__nv_cvt_bfloat16raw_to_fp8(
|
| 453 |
+
raw, saturate, fp8_interpretation));
|
| 454 |
+
return storage;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
|
| 458 |
+
__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
|
| 459 |
+
const __nv_fp8_interpretation_t fp8_interpretation);
|
| 460 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __half_raw
|
| 461 |
+
__nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x,
|
| 462 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 463 |
+
__half_raw res;
|
| 464 |
+
res.x = 0U;
|
| 465 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
|
| 466 |
+
res.x =
|
| 467 |
+
__nv_cvt_fp8x2_to_halfraw2((__nv_fp8x2_storage_t)x, fp8_interpretation)
|
| 468 |
+
.x;
|
| 469 |
+
#else
|
| 470 |
+
unsigned short int ur = (unsigned short int)x;
|
| 471 |
+
ur = (unsigned short int)(ur << 8U);
|
| 472 |
+
|
| 473 |
+
if (fp8_interpretation == __NV_E5M2) {
|
| 474 |
+
if ((ur & 0x7FFFU) > 0x7C00U) {
|
| 475 |
+
/* If NaN, return canonical NaN */
|
| 476 |
+
ur = 0x7FFFU;
|
| 477 |
+
}
|
| 478 |
+
} else { // __NV_E4M3
|
| 479 |
+
unsigned short int sign = ur & 0x8000U;
|
| 480 |
+
unsigned short int exponent =
|
| 481 |
+
(unsigned short int)(((ur & 0x7800U) >> 1U) + 0x2000U);
|
| 482 |
+
unsigned short int mantissa = (ur & 0x0700U) >> 1U;
|
| 483 |
+
unsigned char absx = 0x7FU & (unsigned char)x;
|
| 484 |
+
|
| 485 |
+
if (absx == 0x7FU) // NaN
|
| 486 |
+
{
|
| 487 |
+
ur = 0x7FFFU; // fp16 canonical NaN, discard sign
|
| 488 |
+
} else if (exponent == 0x2000U) {
|
| 489 |
+
// zero or denormal
|
| 490 |
+
if (mantissa != 0U) {
|
| 491 |
+
// normalize
|
| 492 |
+
mantissa = (unsigned short int)(mantissa << 1U);
|
| 493 |
+
while ((mantissa & 0x0400U) == 0U) {
|
| 494 |
+
mantissa = (unsigned short int)(mantissa << 1U);
|
| 495 |
+
exponent = (unsigned short int)(exponent - 0x0400U);
|
| 496 |
+
}
|
| 497 |
+
// discard implicit leading bit
|
| 498 |
+
mantissa &= 0x03FFU;
|
| 499 |
+
} else { // Zero
|
| 500 |
+
exponent = 0U;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
ur = (sign | exponent) | mantissa;
|
| 504 |
+
} else {
|
| 505 |
+
ur = (sign | exponent) | mantissa;
|
| 506 |
+
}
|
| 507 |
+
}
|
| 508 |
+
res.x = ur;
|
| 509 |
+
#endif
|
| 510 |
+
return res;
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw
|
| 514 |
+
__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x,
|
| 515 |
+
const __nv_fp8_interpretation_t fp8_interpretation) {
|
| 516 |
+
__half2_raw res;
|
| 517 |
+
#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890)
|
| 518 |
+
unsigned int half2_storage;
|
| 519 |
+
if (fp8_interpretation == __NV_E5M2) {
|
| 520 |
+
asm("{cvt.rn.f16x2.e5m2x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
|
| 521 |
+
} else {
|
| 522 |
+
asm("{cvt.rn.f16x2.e4m3x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x));
|
| 523 |
+
}
|
| 524 |
+
(void)memcpy(&res, &half2_storage, sizeof(half2_storage));
|
| 525 |
+
#else
|
| 526 |
+
res.x =
|
| 527 |
+
__nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)x, fp8_interpretation).x;
|
| 528 |
+
res.y = __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)(x >> 8U),
|
| 529 |
+
fp8_interpretation)
|
| 530 |
+
.x;
|
| 531 |
+
#endif
|
| 532 |
+
return res;
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
/* All other definitions in this file are only visible to C++ compilers */
|
| 536 |
+
#if defined(__cplusplus)
|
| 537 |
+
|
| 538 |
+
/**
|
| 539 |
+
* \defgroup CUDA_MATH_FP8_E5M2_STRUCT C++ struct for handling fp8 data type of e5m2 kind.
|
| 540 |
+
* \ingroup CUDA_MATH_INTRINSIC_FP8
|
| 541 |
+
*/
|
| 542 |
+
|
| 543 |
+
/**
|
| 544 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 545 |
+
* \brief __nv_fp8_e5m2 datatype
|
| 546 |
+
*
|
| 547 |
+
* \details This structure implements the datatype for handling
|
| 548 |
+
* \p fp8 floating-point numbers of \p e5m2 kind:
|
| 549 |
+
* with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
|
| 550 |
+
*
|
| 551 |
+
* The structure implements converting constructors and operators.
|
| 552 |
+
*/
|
| 553 |
+
struct __CUDA_ALIGN__(1) __nv_fp8_e5m2 {
|
| 554 |
+
public:
|
| 555 |
+
/**
|
| 556 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 557 |
+
* Storage variable contains the \p fp8 floating-point data.
|
| 558 |
+
*/
|
| 559 |
+
__nv_fp8_storage_t __x;
|
| 560 |
+
|
| 561 |
+
/**
|
| 562 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 563 |
+
* Constructor by default.
|
| 564 |
+
*/
|
| 565 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP8)
|
| 566 |
+
__nv_fp8_e5m2() = default;
|
| 567 |
+
#else
|
| 568 |
+
__CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2() {}
|
| 569 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 570 |
+
|
| 571 |
+
#if !defined(__CUDA_NO_FP8_CONVERSIONS__)
|
| 572 |
+
|
| 573 |
+
/* Construct from wider FP types */
|
| 574 |
+
/* Note we do avoid constructor init-list because of special host/device
|
| 575 |
+
* compilation rules */
|
| 576 |
+
|
| 577 |
+
/**
|
| 578 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 579 |
+
* Constructor from \p __half data type, relies on \p __NV_SATFINITE
|
| 580 |
+
* behavior for out-of-range values.
|
| 581 |
+
*/
|
| 582 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __half f) {
|
| 583 |
+
__x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
|
| 584 |
+
__NV_SATFINITE, __NV_E5M2);
|
| 585 |
+
}
|
| 586 |
+
/**
|
| 587 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 588 |
+
* Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
|
| 589 |
+
* behavior for out-of-range values.
|
| 590 |
+
*/
|
| 591 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __nv_bfloat16 f) {
|
| 592 |
+
__x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
|
| 593 |
+
__NV_SATFINITE, __NV_E5M2);
|
| 594 |
+
}
|
| 595 |
+
/**
|
| 596 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 597 |
+
* Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
|
| 598 |
+
* for out-of-range values.
|
| 599 |
+
*/
|
| 600 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const float f) {
|
| 601 |
+
__x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
|
| 602 |
+
}
|
| 603 |
+
/**
|
| 604 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 605 |
+
* Constructor from \p double data type, relies on \p __NV_SATFINITE
|
| 606 |
+
* behavior for out-of-range values.
|
| 607 |
+
*/
|
| 608 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const double f) {
|
| 609 |
+
__x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E5M2);
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
/* Converts from integral */
|
| 613 |
+
|
| 614 |
+
/**
|
| 615 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 616 |
+
* Constructor from \p unsigned \p short \p int data type, relies on \p
|
| 617 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 618 |
+
*/
|
| 619 |
+
explicit __CUDA_HOSTDEVICE_FP8__
|
| 620 |
+
__nv_fp8_e5m2(const unsigned short int val) {
|
| 621 |
+
__x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
|
| 622 |
+
}
|
| 623 |
+
/**
|
| 624 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 625 |
+
* Constructor from \p unsigned \p int data type, relies on \p
|
| 626 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 627 |
+
*/
|
| 628 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const unsigned int val) {
|
| 629 |
+
__x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
|
| 630 |
+
}
|
| 631 |
+
/**
|
| 632 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 633 |
+
* Constructor from \p unsigned \p long \p long \p int data type, relies on
|
| 634 |
+
* \p __NV_SATFINITE behavior for out-of-range values.
|
| 635 |
+
*/
|
| 636 |
+
explicit __CUDA_HOSTDEVICE_FP8__
|
| 637 |
+
__nv_fp8_e5m2(const unsigned long long int val) {
|
| 638 |
+
__x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
/**
|
| 642 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 643 |
+
* Constructor from \p short \p int data type.
|
| 644 |
+
*/
|
| 645 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const short int val) {
|
| 646 |
+
__x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
|
| 647 |
+
}
|
| 648 |
+
/**
|
| 649 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 650 |
+
* Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
|
| 651 |
+
* for out-of-range values.
|
| 652 |
+
*/
|
| 653 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const int val) {
|
| 654 |
+
__x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
|
| 655 |
+
}
|
| 656 |
+
/**
|
| 657 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 658 |
+
* Constructor from \p long \p long \p int data type, relies on \p
|
| 659 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 660 |
+
*/
|
| 661 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const long long int val) {
|
| 662 |
+
__x = static_cast<__nv_fp8_e5m2>(static_cast<float>(val)).__x;
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
|
| 666 |
+
/* Widening FP converts */
|
| 667 |
+
/**
|
| 668 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 669 |
+
* Conversion operator to \p __half data type.
|
| 670 |
+
*/
|
| 671 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
|
| 672 |
+
return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
|
| 673 |
+
}
|
| 674 |
+
/**
|
| 675 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 676 |
+
* Conversion operator to \p float data type.
|
| 677 |
+
*/
|
| 678 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
|
| 679 |
+
return __internal_halfraw_to_float(
|
| 680 |
+
__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2));
|
| 681 |
+
}
|
| 682 |
+
/**
|
| 683 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 684 |
+
* Conversion operator to \p __nv_bfloat16 data type.
|
| 685 |
+
*/
|
| 686 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
|
| 687 |
+
return static_cast<__nv_bfloat16>(
|
| 688 |
+
__internal_float_to_bf16raw_rz(float(*this)));
|
| 689 |
+
}
|
| 690 |
+
/**
|
| 691 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 692 |
+
* Conversion operator to \p double data type.
|
| 693 |
+
*/
|
| 694 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
|
| 695 |
+
return static_cast<double>(float(*this));
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
/* Convert to integral */
|
| 699 |
+
|
| 700 |
+
/**
|
| 701 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 702 |
+
* Conversion operator to \p unsigned \p char data type.
|
| 703 |
+
* Clamps negative and too large inputs to the output range.
|
| 704 |
+
* \p NaN inputs convert to \p zero.
|
| 705 |
+
*/
|
| 706 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
|
| 707 |
+
unsigned char i;
|
| 708 |
+
const float f = float(*this);
|
| 709 |
+
const unsigned char max_val = 0xFFU;
|
| 710 |
+
const unsigned char min_val = 0U;
|
| 711 |
+
const unsigned char bits = (*this).__x;
|
| 712 |
+
// saturation fixup
|
| 713 |
+
if ((bits & 0x7FU) > 0x7CU) {
|
| 714 |
+
// NaN
|
| 715 |
+
i = 0;
|
| 716 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 717 |
+
// saturate maximum
|
| 718 |
+
i = max_val;
|
| 719 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 720 |
+
// saturate minimum
|
| 721 |
+
i = min_val;
|
| 722 |
+
} else {
|
| 723 |
+
// normal value
|
| 724 |
+
i = static_cast<unsigned char>(f);
|
| 725 |
+
}
|
| 726 |
+
return i;
|
| 727 |
+
}
|
| 728 |
+
/**
|
| 729 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 730 |
+
* Conversion operator to \p unsigned \p short \p int data type.
|
| 731 |
+
* Clamps negative and too large inputs to the output range.
|
| 732 |
+
* \p NaN inputs convert to \p zero.
|
| 733 |
+
*/
|
| 734 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
|
| 735 |
+
return __half2ushort_rz(__half(*this));
|
| 736 |
+
}
|
| 737 |
+
/**
|
| 738 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 739 |
+
* Conversion operator to \p unsigned \p int data type.
|
| 740 |
+
* Clamps negative and too large inputs to the output range.
|
| 741 |
+
* \p NaN inputs convert to \p zero.
|
| 742 |
+
*/
|
| 743 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
|
| 744 |
+
return __half2uint_rz(__half(*this));
|
| 745 |
+
}
|
| 746 |
+
/**
|
| 747 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 748 |
+
* Conversion operator to \p unsigned \p long \p long \p int data type.
|
| 749 |
+
* Clamps negative and too large inputs to the output range.
|
| 750 |
+
* \p NaN inputs convert to \p 0x8000000000000000ULL.
|
| 751 |
+
*/
|
| 752 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
|
| 753 |
+
return __half2ull_rz(__half(*this));
|
| 754 |
+
}
|
| 755 |
+
|
| 756 |
+
/**
|
| 757 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 758 |
+
* Conversion operator to \p signed \p char data type.
|
| 759 |
+
* Clamps too large inputs to the output range.
|
| 760 |
+
* \p NaN inputs convert to \p zero.
|
| 761 |
+
*/
|
| 762 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
|
| 763 |
+
signed char i;
|
| 764 |
+
const float f = float(*this);
|
| 765 |
+
const signed char max_val = (signed char)0x7FU;
|
| 766 |
+
const signed char min_val = (signed char)0x80U;
|
| 767 |
+
const unsigned char bits = (*this).__x;
|
| 768 |
+
// saturation fixup
|
| 769 |
+
if ((bits & 0x7FU) > 0x7CU) {
|
| 770 |
+
// NaN
|
| 771 |
+
i = 0;
|
| 772 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 773 |
+
// saturate maximum
|
| 774 |
+
i = max_val;
|
| 775 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 776 |
+
// saturate minimum
|
| 777 |
+
i = min_val;
|
| 778 |
+
} else {
|
| 779 |
+
// normal value
|
| 780 |
+
i = static_cast<signed char>(f);
|
| 781 |
+
}
|
| 782 |
+
return i;
|
| 783 |
+
}
|
| 784 |
+
/**
|
| 785 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 786 |
+
* Conversion operator to \p short \p int data type.
|
| 787 |
+
* Clamps too large inputs to the output range.
|
| 788 |
+
* \p NaN inputs convert to \p zero.
|
| 789 |
+
*/
|
| 790 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
|
| 791 |
+
return __half2short_rz(__half(*this));
|
| 792 |
+
}
|
| 793 |
+
/**
|
| 794 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 795 |
+
* Conversion operator to \p int data type.
|
| 796 |
+
* Clamps too large inputs to the output range.
|
| 797 |
+
* \p NaN inputs convert to \p zero.
|
| 798 |
+
*/
|
| 799 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
|
| 800 |
+
return __half2int_rz(__half(*this));
|
| 801 |
+
}
|
| 802 |
+
/**
|
| 803 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 804 |
+
* Conversion operator to \p long \p long \p int data type.
|
| 805 |
+
* Clamps too large inputs to the output range.
|
| 806 |
+
* \p NaN inputs convert to \p 0x8000000000000000LL.
|
| 807 |
+
*/
|
| 808 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
|
| 809 |
+
return __half2ll_rz(__half(*this));
|
| 810 |
+
}
|
| 811 |
+
|
| 812 |
+
/**
|
| 813 |
+
* \ingroup CUDA_MATH_FP8_E5M2_STRUCT
|
| 814 |
+
* Conversion operator to \p bool data type.
|
| 815 |
+
* +0 and -0 inputs convert to \p false.
|
| 816 |
+
* Non-zero inputs convert to \p true.
|
| 817 |
+
*/
|
| 818 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
|
| 819 |
+
return (__x & 0x7FU) != 0U;
|
| 820 |
+
}
|
| 821 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
|
| 822 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
|
| 823 |
+
};
|
| 824 |
+
|
| 825 |
+
/**
|
| 826 |
+
* \defgroup CUDA_MATH_FP8X2_E5M2_STRUCT C++ struct for handling vector type of two fp8 values of e5m2 kind.
|
| 827 |
+
* \ingroup CUDA_MATH_INTRINSIC_FP8
|
| 828 |
+
*/
|
| 829 |
+
|
| 830 |
+
/**
|
| 831 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 832 |
+
* \brief __nv_fp8x2_e5m2 datatype
|
| 833 |
+
*
|
| 834 |
+
* \details This structure implements the datatype for handling two
|
| 835 |
+
* \p fp8 floating-point numbers of \p e5m2 kind each:
|
| 836 |
+
* with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
|
| 837 |
+
*
|
| 838 |
+
* The structure implements converting constructors and operators.
|
| 839 |
+
*/
|
| 840 |
+
struct __CUDA_ALIGN__(2) __nv_fp8x2_e5m2 {
|
| 841 |
+
public:
|
| 842 |
+
/**
|
| 843 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 844 |
+
* Storage variable contains the vector of two \p fp8 floating-point data
|
| 845 |
+
* values.
|
| 846 |
+
*/
|
| 847 |
+
__nv_fp8x2_storage_t __x;
|
| 848 |
+
|
| 849 |
+
/**
|
| 850 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 851 |
+
* Constructor by default.
|
| 852 |
+
*/
|
| 853 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP8)
|
| 854 |
+
__nv_fp8x2_e5m2() = default;
|
| 855 |
+
#else
|
| 856 |
+
__CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2() {}
|
| 857 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 858 |
+
|
| 859 |
+
#if !defined(__CUDA_NO_FP8_CONVERSIONS__)
|
| 860 |
+
|
| 861 |
+
/* Construct from wider types */
|
| 862 |
+
|
| 863 |
+
/**
|
| 864 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 865 |
+
* Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
|
| 866 |
+
* behavior for out-of-range values.
|
| 867 |
+
*/
|
| 868 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __half2 f) {
|
| 869 |
+
__x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
|
| 870 |
+
__NV_SATFINITE, __NV_E5M2);
|
| 871 |
+
}
|
| 872 |
+
/**
|
| 873 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 874 |
+
* Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
|
| 875 |
+
* behavior for out-of-range values.
|
| 876 |
+
*/
|
| 877 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __nv_bfloat162 f) {
|
| 878 |
+
__x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
|
| 879 |
+
__NV_SATFINITE, __NV_E5M2);
|
| 880 |
+
}
|
| 881 |
+
/**
|
| 882 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 883 |
+
* Constructor from \p float2 data type, relies on \p __NV_SATFINITE
|
| 884 |
+
* behavior for out-of-range values.
|
| 885 |
+
*/
|
| 886 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const float2 f) {
|
| 887 |
+
__x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
|
| 888 |
+
}
|
| 889 |
+
/**
|
| 890 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 891 |
+
* Constructor from \p double2 data type, relies on \p __NV_SATFINITE
|
| 892 |
+
* behavior for out-of-range values.
|
| 893 |
+
*/
|
| 894 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const double2 f) {
|
| 895 |
+
__x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2);
|
| 896 |
+
}
|
| 897 |
+
|
| 898 |
+
#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
|
| 899 |
+
/* Widening converts */
|
| 900 |
+
/**
|
| 901 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 902 |
+
* Conversion operator to \p __half2 data type.
|
| 903 |
+
*/
|
| 904 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
|
| 905 |
+
return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
|
| 906 |
+
}
|
| 907 |
+
/**
|
| 908 |
+
* \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT
|
| 909 |
+
* Conversion operator to \p float2 data type.
|
| 910 |
+
*/
|
| 911 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
|
| 912 |
+
return __internal_halfraw2_to_float2(
|
| 913 |
+
__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2));
|
| 914 |
+
}
|
| 915 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
|
| 916 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
|
| 917 |
+
};
|
| 918 |
+
|
| 919 |
+
__CUDA_HOSTDEVICE_FP8_DECL__ unsigned int
|
| 920 |
+
__internal_pack_u16x2_to_u32(const unsigned short int src_lo,
|
| 921 |
+
const unsigned short int src_hi) {
|
| 922 |
+
unsigned int dst;
|
| 923 |
+
#if (defined __CUDACC__) && (defined __CUDA_ARCH__)
|
| 924 |
+
asm("{ mov.b32 %0, {%1,%2};}\n" : "=r"(dst) : "h"(src_lo), "h"(src_hi));
|
| 925 |
+
#else
|
| 926 |
+
dst = (static_cast<unsigned int>(src_hi) << 16U) |
|
| 927 |
+
static_cast<unsigned int>(src_lo);
|
| 928 |
+
#endif
|
| 929 |
+
return dst;
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
/**
|
| 933 |
+
* \defgroup CUDA_MATH_FP8X4_E5M2_STRUCT C++ struct for handling vector type of four fp8 values of e5m2 kind.
|
| 934 |
+
* \ingroup CUDA_MATH_INTRINSIC_FP8
|
| 935 |
+
*/
|
| 936 |
+
|
| 937 |
+
/**
|
| 938 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 939 |
+
* \brief __nv_fp8x4_e5m2 datatype
|
| 940 |
+
*
|
| 941 |
+
* \details This structure implements the datatype for handling four
|
| 942 |
+
* \p fp8 floating-point numbers of \p e5m2 kind each:
|
| 943 |
+
* with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits.
|
| 944 |
+
*
|
| 945 |
+
* The structure implements converting constructors and operators.
|
| 946 |
+
*/
|
| 947 |
+
struct __CUDA_ALIGN__(4) __nv_fp8x4_e5m2 {
|
| 948 |
+
public:
|
| 949 |
+
/**
|
| 950 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 951 |
+
* Storage variable contains the vector of four \p fp8 floating-point data
|
| 952 |
+
* values.
|
| 953 |
+
*/
|
| 954 |
+
__nv_fp8x4_storage_t __x;
|
| 955 |
+
|
| 956 |
+
/**
|
| 957 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 958 |
+
* Constructor by default.
|
| 959 |
+
*/
|
| 960 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP8)
|
| 961 |
+
__nv_fp8x4_e5m2() = default;
|
| 962 |
+
#else
|
| 963 |
+
__CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2() {}
|
| 964 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 965 |
+
|
| 966 |
+
#if !defined(__CUDA_NO_FP8_CONVERSIONS__)
|
| 967 |
+
|
| 968 |
+
/* Construct from wider types */
|
| 969 |
+
|
| 970 |
+
/**
|
| 971 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 972 |
+
* Constructor from a pair of \p __half2 data type values,
|
| 973 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 974 |
+
*/
|
| 975 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __half2 flo,
|
| 976 |
+
const __half2 fhi) {
|
| 977 |
+
const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
|
| 978 |
+
static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E5M2);
|
| 979 |
+
const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
|
| 980 |
+
static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
|
| 981 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 982 |
+
}
|
| 983 |
+
/**
|
| 984 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 985 |
+
* Constructor from a pair of \p __nv_bfloat162 data type values,
|
| 986 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 987 |
+
*/
|
| 988 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __nv_bfloat162 flo,
|
| 989 |
+
const __nv_bfloat162 fhi) {
|
| 990 |
+
const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
|
| 991 |
+
static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E5M2);
|
| 992 |
+
const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
|
| 993 |
+
static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E5M2);
|
| 994 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 995 |
+
}
|
| 996 |
+
/**
|
| 997 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 998 |
+
* Constructor from \p float4 vector data type,
|
| 999 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 1000 |
+
*/
|
| 1001 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const float4 f) {
|
| 1002 |
+
const float2 flo = {f.x, f.y};
|
| 1003 |
+
const float2 fhi = {f.z, f.w};
|
| 1004 |
+
const __nv_fp8x2_storage_t rlo =
|
| 1005 |
+
__nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
|
| 1006 |
+
const __nv_fp8x2_storage_t rhi =
|
| 1007 |
+
__nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
|
| 1008 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 1009 |
+
}
|
| 1010 |
+
/**
|
| 1011 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 1012 |
+
* Constructor from \p double4 vector data type,
|
| 1013 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 1014 |
+
*/
|
| 1015 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const double4 f) {
|
| 1016 |
+
const double2 flo = {f.x, f.y};
|
| 1017 |
+
const double2 fhi = {f.z, f.w};
|
| 1018 |
+
const __nv_fp8x2_storage_t rlo =
|
| 1019 |
+
__nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2);
|
| 1020 |
+
const __nv_fp8x2_storage_t rhi =
|
| 1021 |
+
__nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2);
|
| 1022 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 1023 |
+
}
|
| 1024 |
+
|
| 1025 |
+
#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
|
| 1026 |
+
/* Widening converts */
|
| 1027 |
+
|
| 1028 |
+
/**
|
| 1029 |
+
* \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT
|
| 1030 |
+
* Conversion operator to \p float4 vector data type.
|
| 1031 |
+
*/
|
| 1032 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
|
| 1033 |
+
const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
|
| 1034 |
+
const __nv_fp8x2_storage_t shi =
|
| 1035 |
+
static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
|
| 1036 |
+
float2 rlo = __internal_halfraw2_to_float2(
|
| 1037 |
+
__nv_cvt_fp8x2_to_halfraw2(slo, __NV_E5M2));
|
| 1038 |
+
float2 rhi = __internal_halfraw2_to_float2(
|
| 1039 |
+
__nv_cvt_fp8x2_to_halfraw2(shi, __NV_E5M2));
|
| 1040 |
+
float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
|
| 1041 |
+
return res;
|
| 1042 |
+
}
|
| 1043 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
|
| 1044 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
|
| 1045 |
+
};
|
| 1046 |
+
|
| 1047 |
+
/**
|
| 1048 |
+
* \defgroup CUDA_MATH_FP8_E4M3_STRUCT C++ struct for handling fp8 data type of e4m3 kind.
|
| 1049 |
+
* \ingroup CUDA_MATH_INTRINSIC_FP8
|
| 1050 |
+
*/
|
| 1051 |
+
|
| 1052 |
+
/**
|
| 1053 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1054 |
+
* \brief __nv_fp8_e4m3 datatype
|
| 1055 |
+
*
|
| 1056 |
+
* \details This structure implements the datatype for storing
|
| 1057 |
+
* \p fp8 floating-point numbers of \p e4m3 kind:
|
| 1058 |
+
* with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
|
| 1059 |
+
* The encoding doesn't support Infinity.
|
| 1060 |
+
* NaNs are limited to 0x7F and 0xFF values.
|
| 1061 |
+
*
|
| 1062 |
+
* The structure implements converting constructors and operators.
|
| 1063 |
+
*/
|
| 1064 |
+
struct __CUDA_ALIGN__(1) __nv_fp8_e4m3 {
|
| 1065 |
+
public:
|
| 1066 |
+
/**
|
| 1067 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1068 |
+
* Storage variable contains the \p fp8 floating-point data.
|
| 1069 |
+
*/
|
| 1070 |
+
__nv_fp8_storage_t __x;
|
| 1071 |
+
|
| 1072 |
+
/**
|
| 1073 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1074 |
+
* Constructor by default.
|
| 1075 |
+
*/
|
| 1076 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP8)
|
| 1077 |
+
__nv_fp8_e4m3() = default;
|
| 1078 |
+
#else
|
| 1079 |
+
__CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3() {}
|
| 1080 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 1081 |
+
|
| 1082 |
+
#if !defined(__CUDA_NO_FP8_CONVERSIONS__)
|
| 1083 |
+
|
| 1084 |
+
/* Construct from wider FP types */
|
| 1085 |
+
/* Note we do avoid constructor init-list because of special host/device
|
| 1086 |
+
* compilation rules */
|
| 1087 |
+
|
| 1088 |
+
/**
|
| 1089 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1090 |
+
* Constructor from \p __half data type, relies on \p __NV_SATFINITE
|
| 1091 |
+
* behavior for out-of-range values.
|
| 1092 |
+
*/
|
| 1093 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __half f) {
|
| 1094 |
+
__x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f),
|
| 1095 |
+
__NV_SATFINITE, __NV_E4M3);
|
| 1096 |
+
}
|
| 1097 |
+
/**
|
| 1098 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1099 |
+
* Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE
|
| 1100 |
+
* behavior for out-of-range values.
|
| 1101 |
+
*/
|
| 1102 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __nv_bfloat16 f) {
|
| 1103 |
+
__x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f),
|
| 1104 |
+
__NV_SATFINITE, __NV_E4M3);
|
| 1105 |
+
}
|
| 1106 |
+
/**
|
| 1107 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1108 |
+
* Constructor from \p float data type, relies on \p __NV_SATFINITE behavior
|
| 1109 |
+
* for out-of-range values.
|
| 1110 |
+
*/
|
| 1111 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const float f) {
|
| 1112 |
+
__x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
|
| 1113 |
+
}
|
| 1114 |
+
/**
|
| 1115 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1116 |
+
* Constructor from \p double data type, relies on \p __NV_SATFINITE
|
| 1117 |
+
* behavior for out-of-range values.
|
| 1118 |
+
*/
|
| 1119 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const double f) {
|
| 1120 |
+
__x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E4M3);
|
| 1121 |
+
}
|
| 1122 |
+
|
| 1123 |
+
/* Converts from integral */
|
| 1124 |
+
|
| 1125 |
+
/**
|
| 1126 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1127 |
+
* Constructor from \p unsigned \p short \p int data type, relies on \p
|
| 1128 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 1129 |
+
*/
|
| 1130 |
+
explicit __CUDA_HOSTDEVICE_FP8__
|
| 1131 |
+
__nv_fp8_e4m3(const unsigned short int val) {
|
| 1132 |
+
__x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
|
| 1133 |
+
}
|
| 1134 |
+
/**
|
| 1135 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1136 |
+
* Constructor from \p unsigned \p int data type, relies on \p
|
| 1137 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 1138 |
+
*/
|
| 1139 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const unsigned int val) {
|
| 1140 |
+
__x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
|
| 1141 |
+
}
|
| 1142 |
+
/**
|
| 1143 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1144 |
+
* Constructor from \p unsigned \p long \p long \p int data type, relies on
|
| 1145 |
+
* \p __NV_SATFINITE behavior for out-of-range values.
|
| 1146 |
+
*/
|
| 1147 |
+
explicit __CUDA_HOSTDEVICE_FP8__
|
| 1148 |
+
__nv_fp8_e4m3(const unsigned long long int val) {
|
| 1149 |
+
__x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
|
| 1150 |
+
}
|
| 1151 |
+
|
| 1152 |
+
/**
|
| 1153 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1154 |
+
* Constructor from \p short \p int data type, relies on \p
|
| 1155 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 1156 |
+
*/
|
| 1157 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const short int val) {
|
| 1158 |
+
__x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
|
| 1159 |
+
}
|
| 1160 |
+
/**
|
| 1161 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1162 |
+
* Constructor from \p int data type, relies on \p __NV_SATFINITE behavior
|
| 1163 |
+
* for out-of-range values.
|
| 1164 |
+
*/
|
| 1165 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const int val) {
|
| 1166 |
+
__x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
|
| 1167 |
+
}
|
| 1168 |
+
/**
|
| 1169 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1170 |
+
* Constructor from \p long \p long \p int data type, relies on \p
|
| 1171 |
+
* __NV_SATFINITE behavior for out-of-range values.
|
| 1172 |
+
*/
|
| 1173 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const long long int val) {
|
| 1174 |
+
__x = static_cast<__nv_fp8_e4m3>(static_cast<float>(val)).__x;
|
| 1175 |
+
}
|
| 1176 |
+
|
| 1177 |
+
#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
|
| 1178 |
+
/* Widening FP converts */
|
| 1179 |
+
/**
|
| 1180 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1181 |
+
* Conversion operator to \p __half data type.
|
| 1182 |
+
*/
|
| 1183 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const {
|
| 1184 |
+
return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
|
| 1185 |
+
}
|
| 1186 |
+
/**
|
| 1187 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1188 |
+
* Conversion operator to \p float data type.
|
| 1189 |
+
*/
|
| 1190 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator float() const {
|
| 1191 |
+
return __internal_halfraw_to_float(
|
| 1192 |
+
__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3));
|
| 1193 |
+
}
|
| 1194 |
+
/**
|
| 1195 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1196 |
+
* Conversion operator to \p __nv_bfloat16 data type.
|
| 1197 |
+
*/
|
| 1198 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const {
|
| 1199 |
+
return static_cast<__nv_bfloat16>(
|
| 1200 |
+
__internal_float_to_bf16raw_rz(float(*this)));
|
| 1201 |
+
}
|
| 1202 |
+
/**
|
| 1203 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1204 |
+
* Conversion operator to \p double data type.
|
| 1205 |
+
*/
|
| 1206 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator double() const {
|
| 1207 |
+
return static_cast<double>(float(*this));
|
| 1208 |
+
}
|
| 1209 |
+
|
| 1210 |
+
/* Convert to integral */
|
| 1211 |
+
|
| 1212 |
+
/**
|
| 1213 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1214 |
+
* Conversion operator to \p unsigned \p char data type.
|
| 1215 |
+
* Clamps negative and too large inputs to the output range.
|
| 1216 |
+
* \p NaN inputs convert to \p zero.
|
| 1217 |
+
*/
|
| 1218 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const {
|
| 1219 |
+
unsigned char i;
|
| 1220 |
+
const float f = float(*this);
|
| 1221 |
+
const unsigned char max_val = 0xFFU;
|
| 1222 |
+
const unsigned char min_val = 0U;
|
| 1223 |
+
const unsigned char bits = (*this).__x;
|
| 1224 |
+
// saturation fixup
|
| 1225 |
+
if ((bits & 0x7FU) == 0x7FU) {
|
| 1226 |
+
// NaN
|
| 1227 |
+
i = 0;
|
| 1228 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 1229 |
+
// saturate maximum
|
| 1230 |
+
i = max_val;
|
| 1231 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 1232 |
+
// saturate minimum
|
| 1233 |
+
i = min_val;
|
| 1234 |
+
} else {
|
| 1235 |
+
// normal value
|
| 1236 |
+
i = static_cast<unsigned char>(f);
|
| 1237 |
+
}
|
| 1238 |
+
return i;
|
| 1239 |
+
}
|
| 1240 |
+
|
| 1241 |
+
/**
|
| 1242 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1243 |
+
* Conversion operator to \p unsigned \p short \p int data type.
|
| 1244 |
+
* Clamps negative inputs to zero.
|
| 1245 |
+
* \p NaN inputs convert to \p zero.
|
| 1246 |
+
*/
|
| 1247 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const {
|
| 1248 |
+
return __half2ushort_rz(__half(*this));
|
| 1249 |
+
}
|
| 1250 |
+
/**
|
| 1251 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1252 |
+
* Conversion operator to \p unsigned \p int data type.
|
| 1253 |
+
* Clamps negative inputs to zero.
|
| 1254 |
+
* \p NaN inputs convert to \p zero.
|
| 1255 |
+
*/
|
| 1256 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const {
|
| 1257 |
+
return __half2uint_rz(__half(*this));
|
| 1258 |
+
}
|
| 1259 |
+
/**
|
| 1260 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1261 |
+
* Conversion operator to \p unsigned \p long \p long \p int data type.
|
| 1262 |
+
* Clamps negative inputs to zero.
|
| 1263 |
+
* \p NaN inputs convert to \p 0x8000000000000000ULL.
|
| 1264 |
+
*/
|
| 1265 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const {
|
| 1266 |
+
return __half2ull_rz(__half(*this));
|
| 1267 |
+
}
|
| 1268 |
+
|
| 1269 |
+
/**
|
| 1270 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1271 |
+
* Conversion operator to \p signed \p char data type.
|
| 1272 |
+
* Clamps too large inputs to the output range.
|
| 1273 |
+
* \p NaN inputs convert to \p zero.
|
| 1274 |
+
*/
|
| 1275 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const {
|
| 1276 |
+
signed char i;
|
| 1277 |
+
const float f = float(*this);
|
| 1278 |
+
const signed char max_val = (signed char)0x7FU;
|
| 1279 |
+
const signed char min_val = (signed char)0x80U;
|
| 1280 |
+
const unsigned char bits = (*this).__x;
|
| 1281 |
+
// saturation fixup
|
| 1282 |
+
if ((bits & 0x7FU) == 0x7FU) {
|
| 1283 |
+
// NaN
|
| 1284 |
+
i = 0;
|
| 1285 |
+
} else if (f > static_cast<float>(max_val)) {
|
| 1286 |
+
// saturate maximum
|
| 1287 |
+
i = max_val;
|
| 1288 |
+
} else if (f < static_cast<float>(min_val)) {
|
| 1289 |
+
// saturate minimum
|
| 1290 |
+
i = min_val;
|
| 1291 |
+
} else {
|
| 1292 |
+
// normal value
|
| 1293 |
+
i = static_cast<signed char>(f);
|
| 1294 |
+
}
|
| 1295 |
+
return i;
|
| 1296 |
+
}
|
| 1297 |
+
/**
|
| 1298 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1299 |
+
* Conversion operator to \p short \p int data type.
|
| 1300 |
+
* \p NaN inputs convert to \p zero.
|
| 1301 |
+
*/
|
| 1302 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const {
|
| 1303 |
+
return __half2short_rz(__half(*this));
|
| 1304 |
+
}
|
| 1305 |
+
/**
|
| 1306 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1307 |
+
* Conversion operator to \p int data type.
|
| 1308 |
+
* \p NaN inputs convert to \p zero.
|
| 1309 |
+
*/
|
| 1310 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator int() const {
|
| 1311 |
+
return __half2int_rz(__half(*this));
|
| 1312 |
+
}
|
| 1313 |
+
/**
|
| 1314 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1315 |
+
* Conversion operator to \p long \p long \p int data type.
|
| 1316 |
+
* \p NaN inputs convert to \p 0x8000000000000000LL.
|
| 1317 |
+
*/
|
| 1318 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const {
|
| 1319 |
+
return __half2ll_rz(__half(*this));
|
| 1320 |
+
}
|
| 1321 |
+
|
| 1322 |
+
/**
|
| 1323 |
+
* \ingroup CUDA_MATH_FP8_E4M3_STRUCT
|
| 1324 |
+
* Conversion operator to \p bool data type.
|
| 1325 |
+
* +0 and -0 inputs convert to \p false.
|
| 1326 |
+
* Non-zero inputs convert to \p true.
|
| 1327 |
+
*/
|
| 1328 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const {
|
| 1329 |
+
return (__x & 0x7FU) != 0U;
|
| 1330 |
+
}
|
| 1331 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
|
| 1332 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
|
| 1333 |
+
};
|
| 1334 |
+
|
| 1335 |
+
/**
|
| 1336 |
+
* \defgroup CUDA_MATH_FP8X2_E4M3_STRUCT C++ struct for handling vector type of two fp8 values of e4m3 kind.
|
| 1337 |
+
* \ingroup CUDA_MATH_INTRINSIC_FP8
|
| 1338 |
+
*/
|
| 1339 |
+
|
| 1340 |
+
/**
|
| 1341 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1342 |
+
* \brief __nv_fp8x2_e4m3 datatype
|
| 1343 |
+
*
|
| 1344 |
+
* \details This structure implements the datatype for storage
|
| 1345 |
+
* and operations on the vector of two \p fp8 values of \p e4m3 kind each:
|
| 1346 |
+
* with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
|
| 1347 |
+
* The encoding doesn't support Infinity.
|
| 1348 |
+
* NaNs are limited to 0x7F and 0xFF values.
|
| 1349 |
+
*/
|
| 1350 |
+
struct __CUDA_ALIGN__(2) __nv_fp8x2_e4m3 {
|
| 1351 |
+
public:
|
| 1352 |
+
/**
|
| 1353 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1354 |
+
* Storage variable contains the vector of two \p fp8 floating-point data
|
| 1355 |
+
* values.
|
| 1356 |
+
*/
|
| 1357 |
+
__nv_fp8x2_storage_t __x;
|
| 1358 |
+
|
| 1359 |
+
/**
|
| 1360 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1361 |
+
* Constructor by default.
|
| 1362 |
+
*/
|
| 1363 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP8)
|
| 1364 |
+
__nv_fp8x2_e4m3() = default;
|
| 1365 |
+
#else
|
| 1366 |
+
__CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3() {}
|
| 1367 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 1368 |
+
|
| 1369 |
+
#if !defined(__CUDA_NO_FP8_CONVERSIONS__)
|
| 1370 |
+
|
| 1371 |
+
/* Construct from wider types */
|
| 1372 |
+
|
| 1373 |
+
/**
|
| 1374 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1375 |
+
* Constructor from \p __half2 data type, relies on \p __NV_SATFINITE
|
| 1376 |
+
* behavior for out-of-range values.
|
| 1377 |
+
*/
|
| 1378 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __half2 f) {
|
| 1379 |
+
__x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f),
|
| 1380 |
+
__NV_SATFINITE, __NV_E4M3);
|
| 1381 |
+
}
|
| 1382 |
+
/**
|
| 1383 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1384 |
+
* Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE
|
| 1385 |
+
* behavior for out-of-range values.
|
| 1386 |
+
*/
|
| 1387 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __nv_bfloat162 f) {
|
| 1388 |
+
__x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f),
|
| 1389 |
+
__NV_SATFINITE, __NV_E4M3);
|
| 1390 |
+
}
|
| 1391 |
+
/**
|
| 1392 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1393 |
+
* Constructor from \p float2 data type, relies on \p __NV_SATFINITE
|
| 1394 |
+
* behavior for out-of-range values.
|
| 1395 |
+
*/
|
| 1396 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const float2 f) {
|
| 1397 |
+
__x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
|
| 1398 |
+
}
|
| 1399 |
+
/**
|
| 1400 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1401 |
+
* Constructor from \p double2 data type, relies on \p __NV_SATFINITE
|
| 1402 |
+
* behavior for out-of-range values.
|
| 1403 |
+
*/
|
| 1404 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const double2 f) {
|
| 1405 |
+
__x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3);
|
| 1406 |
+
}
|
| 1407 |
+
|
| 1408 |
+
#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
|
| 1409 |
+
/* Widening converts */
|
| 1410 |
+
/**
|
| 1411 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1412 |
+
* Conversion operator to \p __half2 data type.
|
| 1413 |
+
*/
|
| 1414 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const {
|
| 1415 |
+
return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
|
| 1416 |
+
}
|
| 1417 |
+
/**
|
| 1418 |
+
* \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT
|
| 1419 |
+
* Conversion operator to \p float2 data type.
|
| 1420 |
+
*/
|
| 1421 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const {
|
| 1422 |
+
return __internal_halfraw2_to_float2(
|
| 1423 |
+
__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3));
|
| 1424 |
+
}
|
| 1425 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
|
| 1426 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
|
| 1427 |
+
};
|
| 1428 |
+
|
| 1429 |
+
/**
|
| 1430 |
+
* \defgroup CUDA_MATH_FP8X4_E4M3_STRUCT C++ struct for handling vector type of four fp8 values of e4m3 kind.
|
| 1431 |
+
* \ingroup CUDA_MATH_INTRINSIC_FP8
|
| 1432 |
+
*/
|
| 1433 |
+
|
| 1434 |
+
/**
|
| 1435 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1436 |
+
* \brief __nv_fp8x4_e4m3 datatype
|
| 1437 |
+
*
|
| 1438 |
+
* \details This structure implements the datatype for storage
|
| 1439 |
+
* and operations on the vector of four \p fp8 values of \p e4m3 kind each:
|
| 1440 |
+
* with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits.
|
| 1441 |
+
* The encoding doesn't support Infinity.
|
| 1442 |
+
* NaNs are limited to 0x7F and 0xFF values.
|
| 1443 |
+
*/
|
| 1444 |
+
struct __CUDA_ALIGN__(4) __nv_fp8x4_e4m3 {
|
| 1445 |
+
public:
|
| 1446 |
+
/**
|
| 1447 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1448 |
+
* Storage variable contains the vector of four \p fp8 floating-point data
|
| 1449 |
+
* values.
|
| 1450 |
+
*/
|
| 1451 |
+
__nv_fp8x4_storage_t __x;
|
| 1452 |
+
|
| 1453 |
+
/**
|
| 1454 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1455 |
+
* Constructor by default.
|
| 1456 |
+
*/
|
| 1457 |
+
#if defined(__CPP_VERSION_AT_LEAST_11_FP8)
|
| 1458 |
+
__nv_fp8x4_e4m3() = default;
|
| 1459 |
+
#else
|
| 1460 |
+
__CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3() {}
|
| 1461 |
+
#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */
|
| 1462 |
+
|
| 1463 |
+
#if !defined(__CUDA_NO_FP8_CONVERSIONS__)
|
| 1464 |
+
|
| 1465 |
+
/* Construct from wider types */
|
| 1466 |
+
|
| 1467 |
+
/**
|
| 1468 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1469 |
+
* Constructor from a pair of \p __half2 data type values,
|
| 1470 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 1471 |
+
*/
|
| 1472 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __half2 flo,
|
| 1473 |
+
const __half2 fhi) {
|
| 1474 |
+
const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2(
|
| 1475 |
+
static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E4M3);
|
| 1476 |
+
const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2(
|
| 1477 |
+
static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
|
| 1478 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 1479 |
+
}
|
| 1480 |
+
/**
|
| 1481 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1482 |
+
* Constructor from a pair of \p __nv_bfloat162 data type values,
|
| 1483 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 1484 |
+
*/
|
| 1485 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __nv_bfloat162 flo,
|
| 1486 |
+
const __nv_bfloat162 fhi) {
|
| 1487 |
+
const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2(
|
| 1488 |
+
static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E4M3);
|
| 1489 |
+
const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2(
|
| 1490 |
+
static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E4M3);
|
| 1491 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 1492 |
+
}
|
| 1493 |
+
/**
|
| 1494 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1495 |
+
* Constructor from \p float4 vector data type,
|
| 1496 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 1497 |
+
*/
|
| 1498 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const float4 f) {
|
| 1499 |
+
const float2 flo = {f.x, f.y};
|
| 1500 |
+
const float2 fhi = {f.z, f.w};
|
| 1501 |
+
const __nv_fp8x2_storage_t rlo =
|
| 1502 |
+
__nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
|
| 1503 |
+
const __nv_fp8x2_storage_t rhi =
|
| 1504 |
+
__nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
|
| 1505 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 1506 |
+
}
|
| 1507 |
+
/**
|
| 1508 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1509 |
+
* Constructor from \p double4 vector data type,
|
| 1510 |
+
* relies on \p __NV_SATFINITE behavior for out-of-range values.
|
| 1511 |
+
*/
|
| 1512 |
+
explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const double4 f) {
|
| 1513 |
+
const double2 flo = {f.x, f.y};
|
| 1514 |
+
const double2 fhi = {f.z, f.w};
|
| 1515 |
+
const __nv_fp8x2_storage_t rlo =
|
| 1516 |
+
__nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3);
|
| 1517 |
+
const __nv_fp8x2_storage_t rhi =
|
| 1518 |
+
__nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3);
|
| 1519 |
+
__x = __internal_pack_u16x2_to_u32(rlo, rhi);
|
| 1520 |
+
}
|
| 1521 |
+
|
| 1522 |
+
#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__)
|
| 1523 |
+
/* Widening converts */
|
| 1524 |
+
|
| 1525 |
+
/**
|
| 1526 |
+
* \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT
|
| 1527 |
+
* Conversion operator to \p float4 vector data type.
|
| 1528 |
+
*/
|
| 1529 |
+
explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const {
|
| 1530 |
+
const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x);
|
| 1531 |
+
const __nv_fp8x2_storage_t shi =
|
| 1532 |
+
static_cast<__nv_fp8x2_storage_t>(__x >> 16U);
|
| 1533 |
+
float2 rlo = __internal_halfraw2_to_float2(
|
| 1534 |
+
__nv_cvt_fp8x2_to_halfraw2(slo, __NV_E4M3));
|
| 1535 |
+
float2 rhi = __internal_halfraw2_to_float2(
|
| 1536 |
+
__nv_cvt_fp8x2_to_halfraw2(shi, __NV_E4M3));
|
| 1537 |
+
float4 res = {rlo.x, rlo.y, rhi.x, rhi.y};
|
| 1538 |
+
return res;
|
| 1539 |
+
}
|
| 1540 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */
|
| 1541 |
+
#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */
|
| 1542 |
+
};
|
| 1543 |
+
|
| 1544 |
+
#endif /* defined(__cplusplus) */
|
| 1545 |
+
|
| 1546 |
+
#endif /* end of include guard: __CUDA_FP8_HPP__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_gl_interop.h
ADDED
|
@@ -0,0 +1,514 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_GL_INTEROP_H__)
|
| 51 |
+
#define __CUDA_GL_INTEROP_H__
|
| 52 |
+
|
| 53 |
+
#include "cuda_runtime_api.h"
|
| 54 |
+
|
| 55 |
+
#if defined(__APPLE__)
|
| 56 |
+
|
| 57 |
+
#include <OpenGL/gl.h>
|
| 58 |
+
|
| 59 |
+
#else /* __APPLE__ */
|
| 60 |
+
|
| 61 |
+
#if defined(__arm__) || defined(__aarch64__)
|
| 62 |
+
#ifndef GL_VERSION
|
| 63 |
+
#error Please include the appropriate gl headers before including cuda_gl_interop.h
|
| 64 |
+
#endif
|
| 65 |
+
#else
|
| 66 |
+
#include <GL/gl.h>
|
| 67 |
+
#endif
|
| 68 |
+
|
| 69 |
+
#endif /* __APPLE__ */
|
| 70 |
+
|
| 71 |
+
/** \cond impl_private */
|
| 72 |
+
#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED)
|
| 73 |
+
#define __CUDA_DEPRECATED
|
| 74 |
+
#elif defined(_MSC_VER)
|
| 75 |
+
#define __CUDA_DEPRECATED __declspec(deprecated)
|
| 76 |
+
#elif defined(__GNUC__)
|
| 77 |
+
#define __CUDA_DEPRECATED __attribute__((deprecated))
|
| 78 |
+
#else
|
| 79 |
+
#define __CUDA_DEPRECATED
|
| 80 |
+
#endif
|
| 81 |
+
/** \endcond impl_private */
|
| 82 |
+
|
| 83 |
+
#if defined(__cplusplus)
|
| 84 |
+
extern "C" {
|
| 85 |
+
#endif /* __cplusplus */
|
| 86 |
+
|
| 87 |
+
/**
|
| 88 |
+
* \addtogroup CUDART_OPENGL OpenGL Interoperability
|
| 89 |
+
* This section describes the OpenGL interoperability functions of the CUDA
|
| 90 |
+
* runtime application programming interface. Note that mapping of OpenGL
|
| 91 |
+
* resources is performed with the graphics API agnostic, resource mapping
|
| 92 |
+
* interface described in \ref CUDART_INTEROP "Graphics Interopability".
|
| 93 |
+
*
|
| 94 |
+
* @{
|
| 95 |
+
*/
|
| 96 |
+
|
| 97 |
+
/**
|
| 98 |
+
* CUDA devices corresponding to the current OpenGL context
|
| 99 |
+
*/
|
| 100 |
+
enum cudaGLDeviceList
|
| 101 |
+
{
|
| 102 |
+
cudaGLDeviceListAll = 1, /**< The CUDA devices for all GPUs used by the current OpenGL context */
|
| 103 |
+
cudaGLDeviceListCurrentFrame = 2, /**< The CUDA devices for the GPUs used by the current OpenGL context in its currently rendering frame */
|
| 104 |
+
cudaGLDeviceListNextFrame = 3 /**< The CUDA devices for the GPUs to be used by the current OpenGL context in the next frame */
|
| 105 |
+
};
|
| 106 |
+
|
| 107 |
+
/**
|
| 108 |
+
* \brief Gets the CUDA devices associated with the current OpenGL context
|
| 109 |
+
*
|
| 110 |
+
* Returns in \p *pCudaDeviceCount the number of CUDA-compatible devices
|
| 111 |
+
* corresponding to the current OpenGL context. Also returns in \p *pCudaDevices
|
| 112 |
+
* at most \p cudaDeviceCount of the CUDA-compatible devices corresponding to
|
| 113 |
+
* the current OpenGL context. If any of the GPUs being used by the current OpenGL
|
| 114 |
+
* context are not CUDA capable then the call will return ::cudaErrorNoDevice.
|
| 115 |
+
*
|
| 116 |
+
* \param pCudaDeviceCount - Returned number of CUDA devices corresponding to the
|
| 117 |
+
* current OpenGL context
|
| 118 |
+
* \param pCudaDevices - Returned CUDA devices corresponding to the current
|
| 119 |
+
* OpenGL context
|
| 120 |
+
* \param cudaDeviceCount - The size of the output device array \p pCudaDevices
|
| 121 |
+
* \param deviceList - The set of devices to return. This set may be
|
| 122 |
+
* ::cudaGLDeviceListAll for all devices,
|
| 123 |
+
* ::cudaGLDeviceListCurrentFrame for the devices used to
|
| 124 |
+
* render the current frame (in SLI), or
|
| 125 |
+
* ::cudaGLDeviceListNextFrame for the devices used to
|
| 126 |
+
* render the next frame (in SLI).
|
| 127 |
+
*
|
| 128 |
+
* \return
|
| 129 |
+
* ::cudaSuccess,
|
| 130 |
+
* ::cudaErrorNoDevice,
|
| 131 |
+
* ::cudaErrorInvalidGraphicsContext,
|
| 132 |
+
* ::cudaErrorOperatingSystem,
|
| 133 |
+
* ::cudaErrorUnknown
|
| 134 |
+
*
|
| 135 |
+
* \note This function is not supported on Mac OS X.
|
| 136 |
+
* \notefnerr
|
| 137 |
+
*
|
| 138 |
+
* \sa
|
| 139 |
+
* ::cudaGraphicsUnregisterResource,
|
| 140 |
+
* ::cudaGraphicsMapResources,
|
| 141 |
+
* ::cudaGraphicsSubResourceGetMappedArray,
|
| 142 |
+
* ::cudaGraphicsResourceGetMappedPointer,
|
| 143 |
+
* ::cuGLGetDevices
|
| 144 |
+
*/
|
| 145 |
+
extern __host__ cudaError_t CUDARTAPI cudaGLGetDevices(unsigned int *pCudaDeviceCount, int *pCudaDevices, unsigned int cudaDeviceCount, enum cudaGLDeviceList deviceList);
|
| 146 |
+
|
| 147 |
+
/**
|
| 148 |
+
* \brief Register an OpenGL texture or renderbuffer object
|
| 149 |
+
*
|
| 150 |
+
* Registers the texture or renderbuffer object specified by \p image for access by CUDA.
|
| 151 |
+
* A handle to the registered object is returned as \p resource.
|
| 152 |
+
*
|
| 153 |
+
* \p target must match the type of the object, and must be one of ::GL_TEXTURE_2D,
|
| 154 |
+
* ::GL_TEXTURE_RECTANGLE, ::GL_TEXTURE_CUBE_MAP, ::GL_TEXTURE_3D, ::GL_TEXTURE_2D_ARRAY,
|
| 155 |
+
* or ::GL_RENDERBUFFER.
|
| 156 |
+
*
|
| 157 |
+
* The register flags \p flags specify the intended usage, as follows:
|
| 158 |
+
* - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
|
| 159 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 160 |
+
* read from and written to by CUDA. This is the default value.
|
| 161 |
+
* - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
|
| 162 |
+
* will not write to this resource.
|
| 163 |
+
* - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
|
| 164 |
+
* CUDA will not read from this resource and will write over the
|
| 165 |
+
* entire contents of the resource, so none of the data previously
|
| 166 |
+
* stored in the resource will be preserved.
|
| 167 |
+
* - ::cudaGraphicsRegisterFlagsSurfaceLoadStore: Specifies that CUDA will
|
| 168 |
+
* bind this resource to a surface reference.
|
| 169 |
+
* - ::cudaGraphicsRegisterFlagsTextureGather: Specifies that CUDA will perform
|
| 170 |
+
* texture gather operations on this resource.
|
| 171 |
+
*
|
| 172 |
+
* The following image formats are supported. For brevity's sake, the list is abbreviated.
|
| 173 |
+
* For ex., {GL_R, GL_RG} X {8, 16} would expand to the following 4 formats
|
| 174 |
+
* {GL_R8, GL_R16, GL_RG8, GL_RG16} :
|
| 175 |
+
* - GL_RED, GL_RG, GL_RGBA, GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY
|
| 176 |
+
* - {GL_R, GL_RG, GL_RGBA} X {8, 16, 16F, 32F, 8UI, 16UI, 32UI, 8I, 16I, 32I}
|
| 177 |
+
* - {GL_LUMINANCE, GL_ALPHA, GL_LUMINANCE_ALPHA, GL_INTENSITY} X
|
| 178 |
+
* {8, 16, 16F_ARB, 32F_ARB, 8UI_EXT, 16UI_EXT, 32UI_EXT, 8I_EXT, 16I_EXT, 32I_EXT}
|
| 179 |
+
*
|
| 180 |
+
* The following image classes are currently disallowed:
|
| 181 |
+
* - Textures with borders
|
| 182 |
+
* - Multisampled renderbuffers
|
| 183 |
+
*
|
| 184 |
+
* \param resource - Pointer to the returned object handle
|
| 185 |
+
* \param image - name of texture or renderbuffer object to be registered
|
| 186 |
+
* \param target - Identifies the type of object specified by \p image
|
| 187 |
+
* \param flags - Register flags
|
| 188 |
+
*
|
| 189 |
+
* \return
|
| 190 |
+
* ::cudaSuccess,
|
| 191 |
+
* ::cudaErrorInvalidDevice,
|
| 192 |
+
* ::cudaErrorInvalidValue,
|
| 193 |
+
* ::cudaErrorInvalidResourceHandle,
|
| 194 |
+
* ::cudaErrorOperatingSystem,
|
| 195 |
+
* ::cudaErrorUnknown
|
| 196 |
+
* \notefnerr
|
| 197 |
+
*
|
| 198 |
+
* \sa
|
| 199 |
+
* ::cudaGraphicsUnregisterResource,
|
| 200 |
+
* ::cudaGraphicsMapResources,
|
| 201 |
+
* ::cudaGraphicsSubResourceGetMappedArray,
|
| 202 |
+
* ::cuGraphicsGLRegisterImage
|
| 203 |
+
*/
|
| 204 |
+
extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterImage(struct cudaGraphicsResource **resource, GLuint image, GLenum target, unsigned int flags);
|
| 205 |
+
|
| 206 |
+
/**
|
| 207 |
+
* \brief Registers an OpenGL buffer object
|
| 208 |
+
*
|
| 209 |
+
* Registers the buffer object specified by \p buffer for access by
|
| 210 |
+
* CUDA. A handle to the registered object is returned as \p
|
| 211 |
+
* resource. The register flags \p flags specify the intended usage,
|
| 212 |
+
* as follows:
|
| 213 |
+
*
|
| 214 |
+
* - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
|
| 215 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 216 |
+
* read from and written to by CUDA. This is the default value.
|
| 217 |
+
* - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
|
| 218 |
+
* will not write to this resource.
|
| 219 |
+
* - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
|
| 220 |
+
* CUDA will not read from this resource and will write over the
|
| 221 |
+
* entire contents of the resource, so none of the data previously
|
| 222 |
+
* stored in the resource will be preserved.
|
| 223 |
+
*
|
| 224 |
+
* \param resource - Pointer to the returned object handle
|
| 225 |
+
* \param buffer - name of buffer object to be registered
|
| 226 |
+
* \param flags - Register flags
|
| 227 |
+
*
|
| 228 |
+
* \return
|
| 229 |
+
* ::cudaSuccess,
|
| 230 |
+
* ::cudaErrorInvalidDevice,
|
| 231 |
+
* ::cudaErrorInvalidValue,
|
| 232 |
+
* ::cudaErrorInvalidResourceHandle,
|
| 233 |
+
* ::cudaErrorOperatingSystem,
|
| 234 |
+
* ::cudaErrorUnknown
|
| 235 |
+
* \notefnerr
|
| 236 |
+
*
|
| 237 |
+
* \sa
|
| 238 |
+
* ::cudaGraphicsUnregisterResource,
|
| 239 |
+
* ::cudaGraphicsMapResources,
|
| 240 |
+
* ::cudaGraphicsResourceGetMappedPointer,
|
| 241 |
+
* ::cuGraphicsGLRegisterBuffer
|
| 242 |
+
*/
|
| 243 |
+
extern __host__ cudaError_t CUDARTAPI cudaGraphicsGLRegisterBuffer(struct cudaGraphicsResource **resource, GLuint buffer, unsigned int flags);
|
| 244 |
+
|
| 245 |
+
#ifdef _WIN32
|
| 246 |
+
#ifndef WGL_NV_gpu_affinity
|
| 247 |
+
typedef void* HGPUNV;
|
| 248 |
+
#endif
|
| 249 |
+
|
| 250 |
+
/**
|
| 251 |
+
* \brief Gets the CUDA device associated with hGpu
|
| 252 |
+
*
|
| 253 |
+
* Returns the CUDA device associated with a hGpu, if applicable.
|
| 254 |
+
*
|
| 255 |
+
* \param device - Returns the device associated with hGpu, or -1 if hGpu is
|
| 256 |
+
* not a compute device.
|
| 257 |
+
* \param hGpu - Handle to a GPU, as queried via WGL_NV_gpu_affinity
|
| 258 |
+
*
|
| 259 |
+
* \return
|
| 260 |
+
* ::cudaSuccess
|
| 261 |
+
* \notefnerr
|
| 262 |
+
*
|
| 263 |
+
* \sa
|
| 264 |
+
* ::WGL_NV_gpu_affinity,
|
| 265 |
+
* ::cuWGLGetDevice
|
| 266 |
+
*/
|
| 267 |
+
extern __host__ cudaError_t CUDARTAPI cudaWGLGetDevice(int *device, HGPUNV hGpu);
|
| 268 |
+
#endif
|
| 269 |
+
|
| 270 |
+
/** @} */ /* END CUDART_OPENGL */
|
| 271 |
+
|
| 272 |
+
/**
|
| 273 |
+
* \addtogroup CUDART_OPENGL_DEPRECATED OpenGL Interoperability [DEPRECATED]
|
| 274 |
+
* This section describes deprecated OpenGL interoperability functionality.
|
| 275 |
+
*
|
| 276 |
+
* @{
|
| 277 |
+
*/
|
| 278 |
+
|
| 279 |
+
/**
|
| 280 |
+
* CUDA GL Map Flags
|
| 281 |
+
*/
|
| 282 |
+
enum cudaGLMapFlags
|
| 283 |
+
{
|
| 284 |
+
cudaGLMapFlagsNone = 0, /**< Default; Assume resource can be read/written */
|
| 285 |
+
cudaGLMapFlagsReadOnly = 1, /**< CUDA kernels will not write to this resource */
|
| 286 |
+
cudaGLMapFlagsWriteDiscard = 2 /**< CUDA kernels will only write to and will not read from this resource */
|
| 287 |
+
};
|
| 288 |
+
|
| 289 |
+
/**
|
| 290 |
+
* \brief Sets a CUDA device to use OpenGL interoperability
|
| 291 |
+
*
|
| 292 |
+
* \deprecated This function is deprecated as of CUDA 5.0.
|
| 293 |
+
*
|
| 294 |
+
* This function is deprecated and should no longer be used. It is
|
| 295 |
+
* no longer necessary to associate a CUDA device with an OpenGL
|
| 296 |
+
* context in order to achieve maximum interoperability performance.
|
| 297 |
+
*
|
| 298 |
+
* This function will immediately initialize the primary context on
|
| 299 |
+
* \p device if needed.
|
| 300 |
+
*
|
| 301 |
+
* \param device - Device to use for OpenGL interoperability
|
| 302 |
+
*
|
| 303 |
+
* \return
|
| 304 |
+
* ::cudaSuccess,
|
| 305 |
+
* ::cudaErrorInvalidDevice,
|
| 306 |
+
* ::cudaErrorSetOnActiveProcess
|
| 307 |
+
* \notefnerr
|
| 308 |
+
*
|
| 309 |
+
* \sa ::cudaGraphicsGLRegisterBuffer, ::cudaGraphicsGLRegisterImage
|
| 310 |
+
*/
|
| 311 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetGLDevice(int device);
|
| 312 |
+
|
| 313 |
+
/**
|
| 314 |
+
* \brief Registers a buffer object for access by CUDA
|
| 315 |
+
*
|
| 316 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 317 |
+
*
|
| 318 |
+
* Registers the buffer object of ID \p bufObj for access by
|
| 319 |
+
* CUDA. This function must be called before CUDA can map the buffer
|
| 320 |
+
* object. The OpenGL context used to create the buffer, or another
|
| 321 |
+
* context from the same share group, must be bound to the current
|
| 322 |
+
* thread when this is called.
|
| 323 |
+
*
|
| 324 |
+
* \param bufObj - Buffer object ID to register
|
| 325 |
+
*
|
| 326 |
+
* \return
|
| 327 |
+
* ::cudaSuccess,
|
| 328 |
+
* ::cudaErrorInitializationError
|
| 329 |
+
* \notefnerr
|
| 330 |
+
*
|
| 331 |
+
* \sa ::cudaGraphicsGLRegisterBuffer
|
| 332 |
+
*/
|
| 333 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLRegisterBufferObject(GLuint bufObj);
|
| 334 |
+
|
| 335 |
+
/**
|
| 336 |
+
* \brief Maps a buffer object for access by CUDA
|
| 337 |
+
*
|
| 338 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 339 |
+
*
|
| 340 |
+
* Maps the buffer object of ID \p bufObj into the address space of
|
| 341 |
+
* CUDA and returns in \p *devPtr the base pointer of the resulting
|
| 342 |
+
* mapping. The buffer must have previously been registered by
|
| 343 |
+
* calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
|
| 344 |
+
* by CUDA, any OpenGL operation which references the buffer will
|
| 345 |
+
* result in undefined behavior. The OpenGL context used to create
|
| 346 |
+
* the buffer, or another context from the same share group, must be
|
| 347 |
+
* bound to the current thread when this is called.
|
| 348 |
+
*
|
| 349 |
+
* All streams in the current thread are synchronized with the current
|
| 350 |
+
* GL context.
|
| 351 |
+
*
|
| 352 |
+
* \param devPtr - Returned device pointer to CUDA object
|
| 353 |
+
* \param bufObj - Buffer object ID to map
|
| 354 |
+
*
|
| 355 |
+
* \return
|
| 356 |
+
* ::cudaSuccess,
|
| 357 |
+
* ::cudaErrorMapBufferObjectFailed
|
| 358 |
+
* \notefnerr
|
| 359 |
+
*
|
| 360 |
+
* \sa ::cudaGraphicsMapResources
|
| 361 |
+
*/
|
| 362 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObject(void **devPtr, GLuint bufObj);
|
| 363 |
+
|
| 364 |
+
/**
|
| 365 |
+
* \brief Unmaps a buffer object for access by CUDA
|
| 366 |
+
*
|
| 367 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 368 |
+
*
|
| 369 |
+
* Unmaps the buffer object of ID \p bufObj for access by CUDA. When
|
| 370 |
+
* a buffer is unmapped, the base address returned by
|
| 371 |
+
* ::cudaGLMapBufferObject() is invalid and subsequent references to
|
| 372 |
+
* the address result in undefined behavior. The OpenGL context used
|
| 373 |
+
* to create the buffer, or another context from the same share group,
|
| 374 |
+
* must be bound to the current thread when this is called.
|
| 375 |
+
*
|
| 376 |
+
* All streams in the current thread are synchronized with the current
|
| 377 |
+
* GL context.
|
| 378 |
+
*
|
| 379 |
+
* \param bufObj - Buffer object to unmap
|
| 380 |
+
*
|
| 381 |
+
* \return
|
| 382 |
+
* ::cudaSuccess,
|
| 383 |
+
* ::cudaErrorUnmapBufferObjectFailed
|
| 384 |
+
* \notefnerr
|
| 385 |
+
*
|
| 386 |
+
* \sa ::cudaGraphicsUnmapResources
|
| 387 |
+
*/
|
| 388 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObject(GLuint bufObj);
|
| 389 |
+
|
| 390 |
+
/**
|
| 391 |
+
* \brief Unregisters a buffer object for access by CUDA
|
| 392 |
+
*
|
| 393 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 394 |
+
*
|
| 395 |
+
* Unregisters the buffer object of ID \p bufObj for access by CUDA
|
| 396 |
+
* and releases any CUDA resources associated with the buffer. Once a
|
| 397 |
+
* buffer is unregistered, it may no longer be mapped by CUDA. The GL
|
| 398 |
+
* context used to create the buffer, or another context from the
|
| 399 |
+
* same share group, must be bound to the current thread when this is
|
| 400 |
+
* called.
|
| 401 |
+
*
|
| 402 |
+
* \param bufObj - Buffer object to unregister
|
| 403 |
+
*
|
| 404 |
+
* \return
|
| 405 |
+
* ::cudaSuccess
|
| 406 |
+
* \notefnerr
|
| 407 |
+
*
|
| 408 |
+
* \sa ::cudaGraphicsUnregisterResource
|
| 409 |
+
*/
|
| 410 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnregisterBufferObject(GLuint bufObj);
|
| 411 |
+
|
| 412 |
+
/**
|
| 413 |
+
* \brief Set usage flags for mapping an OpenGL buffer
|
| 414 |
+
*
|
| 415 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 416 |
+
*
|
| 417 |
+
* Set flags for mapping the OpenGL buffer \p bufObj
|
| 418 |
+
*
|
| 419 |
+
* Changes to flags will take effect the next time \p bufObj is mapped.
|
| 420 |
+
* The \p flags argument may be any of the following:
|
| 421 |
+
*
|
| 422 |
+
* - ::cudaGLMapFlagsNone: Specifies no hints about how this buffer will
|
| 423 |
+
* be used. It is therefore assumed that this buffer will be read from and
|
| 424 |
+
* written to by CUDA kernels. This is the default value.
|
| 425 |
+
* - ::cudaGLMapFlagsReadOnly: Specifies that CUDA kernels which access this
|
| 426 |
+
* buffer will not write to the buffer.
|
| 427 |
+
* - ::cudaGLMapFlagsWriteDiscard: Specifies that CUDA kernels which access
|
| 428 |
+
* this buffer will not read from the buffer and will write over the
|
| 429 |
+
* entire contents of the buffer, so none of the data previously stored in
|
| 430 |
+
* the buffer will be preserved.
|
| 431 |
+
*
|
| 432 |
+
* If \p bufObj has not been registered for use with CUDA, then
|
| 433 |
+
* ::cudaErrorInvalidResourceHandle is returned. If \p bufObj is presently
|
| 434 |
+
* mapped for access by CUDA, then ::cudaErrorUnknown is returned.
|
| 435 |
+
*
|
| 436 |
+
* \param bufObj - Registered buffer object to set flags for
|
| 437 |
+
* \param flags - Parameters for buffer mapping
|
| 438 |
+
*
|
| 439 |
+
* \return
|
| 440 |
+
* ::cudaSuccess,
|
| 441 |
+
* ::cudaErrorInvalidValue,
|
| 442 |
+
* ::cudaErrorInvalidResourceHandle,
|
| 443 |
+
* ::cudaErrorUnknown
|
| 444 |
+
* \notefnerr
|
| 445 |
+
*
|
| 446 |
+
* \sa ::cudaGraphicsResourceSetMapFlags
|
| 447 |
+
*/
|
| 448 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLSetBufferObjectMapFlags(GLuint bufObj, unsigned int flags);
|
| 449 |
+
|
| 450 |
+
/**
|
| 451 |
+
* \brief Maps a buffer object for access by CUDA
|
| 452 |
+
*
|
| 453 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 454 |
+
*
|
| 455 |
+
* Maps the buffer object of ID \p bufObj into the address space of
|
| 456 |
+
* CUDA and returns in \p *devPtr the base pointer of the resulting
|
| 457 |
+
* mapping. The buffer must have previously been registered by
|
| 458 |
+
* calling ::cudaGLRegisterBufferObject(). While a buffer is mapped
|
| 459 |
+
* by CUDA, any OpenGL operation which references the buffer will
|
| 460 |
+
* result in undefined behavior. The OpenGL context used to create
|
| 461 |
+
* the buffer, or another context from the same share group, must be
|
| 462 |
+
* bound to the current thread when this is called.
|
| 463 |
+
*
|
| 464 |
+
* Stream /p stream is synchronized with the current GL context.
|
| 465 |
+
*
|
| 466 |
+
* \param devPtr - Returned device pointer to CUDA object
|
| 467 |
+
* \param bufObj - Buffer object ID to map
|
| 468 |
+
* \param stream - Stream to synchronize
|
| 469 |
+
*
|
| 470 |
+
* \return
|
| 471 |
+
* ::cudaSuccess,
|
| 472 |
+
* ::cudaErrorMapBufferObjectFailed
|
| 473 |
+
* \notefnerr
|
| 474 |
+
*
|
| 475 |
+
* \sa ::cudaGraphicsMapResources
|
| 476 |
+
*/
|
| 477 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLMapBufferObjectAsync(void **devPtr, GLuint bufObj, cudaStream_t stream);
|
| 478 |
+
|
| 479 |
+
/**
|
| 480 |
+
* \brief Unmaps a buffer object for access by CUDA
|
| 481 |
+
*
|
| 482 |
+
* \deprecated This function is deprecated as of CUDA 3.0.
|
| 483 |
+
*
|
| 484 |
+
* Unmaps the buffer object of ID \p bufObj for access by CUDA. When
|
| 485 |
+
* a buffer is unmapped, the base address returned by
|
| 486 |
+
* ::cudaGLMapBufferObject() is invalid and subsequent references to
|
| 487 |
+
* the address result in undefined behavior. The OpenGL context used
|
| 488 |
+
* to create the buffer, or another context from the same share group,
|
| 489 |
+
* must be bound to the current thread when this is called.
|
| 490 |
+
*
|
| 491 |
+
* Stream /p stream is synchronized with the current GL context.
|
| 492 |
+
*
|
| 493 |
+
* \param bufObj - Buffer object to unmap
|
| 494 |
+
* \param stream - Stream to synchronize
|
| 495 |
+
*
|
| 496 |
+
* \return
|
| 497 |
+
* ::cudaSuccess,
|
| 498 |
+
* ::cudaErrorUnmapBufferObjectFailed
|
| 499 |
+
* \notefnerr
|
| 500 |
+
*
|
| 501 |
+
* \sa ::cudaGraphicsUnmapResources
|
| 502 |
+
*/
|
| 503 |
+
extern __CUDA_DEPRECATED __host__ cudaError_t CUDARTAPI cudaGLUnmapBufferObjectAsync(GLuint bufObj, cudaStream_t stream);
|
| 504 |
+
|
| 505 |
+
/** @} */ /* END CUDART_OPENGL_DEPRECATED */
|
| 506 |
+
|
| 507 |
+
#if defined(__cplusplus)
|
| 508 |
+
}
|
| 509 |
+
#endif /* __cplusplus */
|
| 510 |
+
|
| 511 |
+
#undef __CUDA_DEPRECATED
|
| 512 |
+
|
| 513 |
+
#endif /* __CUDA_GL_INTEROP_H__ */
|
| 514 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h
ADDED
|
@@ -0,0 +1,1958 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
/**
|
| 51 |
+
* CUDA Occupancy Calculator
|
| 52 |
+
*
|
| 53 |
+
* NAME
|
| 54 |
+
*
|
| 55 |
+
* cudaOccMaxActiveBlocksPerMultiprocessor,
|
| 56 |
+
* cudaOccMaxPotentialOccupancyBlockSize,
|
| 57 |
+
* cudaOccMaxPotentialOccupancyBlockSizeVariableSMem
|
| 58 |
+
* cudaOccAvailableDynamicSMemPerBlock
|
| 59 |
+
*
|
| 60 |
+
* DESCRIPTION
|
| 61 |
+
*
|
| 62 |
+
* The CUDA occupancy calculator provides a standalone, programmatical
|
| 63 |
+
* interface to compute the occupancy of a function on a device. It can also
|
| 64 |
+
* provide occupancy-oriented launch configuration suggestions.
|
| 65 |
+
*
|
| 66 |
+
* The function and device are defined by the user through
|
| 67 |
+
* cudaOccFuncAttributes, cudaOccDeviceProp, and cudaOccDeviceState
|
| 68 |
+
* structures. All APIs require all 3 of them.
|
| 69 |
+
*
|
| 70 |
+
* See the structure definition for more details about the device / function
|
| 71 |
+
* descriptors.
|
| 72 |
+
*
|
| 73 |
+
* See each API's prototype for API usage.
|
| 74 |
+
*
|
| 75 |
+
* COMPATIBILITY
|
| 76 |
+
*
|
| 77 |
+
* The occupancy calculator will be updated on each major CUDA toolkit
|
| 78 |
+
* release. It does not provide forward compatibility, i.e. new hardwares
|
| 79 |
+
* released after this implementation's release will not be supported.
|
| 80 |
+
*
|
| 81 |
+
* NOTE
|
| 82 |
+
*
|
| 83 |
+
* If there is access to CUDA runtime, and the sole intent is to calculate
|
| 84 |
+
* occupancy related values on one of the accessible CUDA devices, using CUDA
|
| 85 |
+
* runtime's occupancy calculation APIs is recommended.
|
| 86 |
+
*
|
| 87 |
+
*/
|
| 88 |
+
|
| 89 |
+
#ifndef __cuda_occupancy_h__
|
| 90 |
+
#define __cuda_occupancy_h__
|
| 91 |
+
|
| 92 |
+
#include <stddef.h>
|
| 93 |
+
#include <limits.h>
|
| 94 |
+
#include <string.h>
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
// __OCC_INLINE will be undefined at the end of this header
|
| 98 |
+
//
|
| 99 |
+
#ifdef __CUDACC__
|
| 100 |
+
#define __OCC_INLINE inline __host__ __device__
|
| 101 |
+
#elif defined _MSC_VER
|
| 102 |
+
#define __OCC_INLINE __inline
|
| 103 |
+
#else // GNUCC assumed
|
| 104 |
+
#define __OCC_INLINE inline
|
| 105 |
+
#endif
|
| 106 |
+
|
| 107 |
+
enum cudaOccError_enum {
|
| 108 |
+
CUDA_OCC_SUCCESS = 0, // no error encountered
|
| 109 |
+
CUDA_OCC_ERROR_INVALID_INPUT = 1, // input parameter is invalid
|
| 110 |
+
CUDA_OCC_ERROR_UNKNOWN_DEVICE = 2, // requested device is not supported in
|
| 111 |
+
// current implementation or device is
|
| 112 |
+
// invalid
|
| 113 |
+
};
|
| 114 |
+
typedef enum cudaOccError_enum cudaOccError;
|
| 115 |
+
|
| 116 |
+
typedef struct cudaOccResult cudaOccResult;
|
| 117 |
+
typedef struct cudaOccDeviceProp cudaOccDeviceProp;
|
| 118 |
+
typedef struct cudaOccFuncAttributes cudaOccFuncAttributes;
|
| 119 |
+
typedef struct cudaOccDeviceState cudaOccDeviceState;
|
| 120 |
+
|
| 121 |
+
/**
|
| 122 |
+
* The CUDA occupancy calculator computes the occupancy of the function
|
| 123 |
+
* described by attributes with the given block size (blockSize), static device
|
| 124 |
+
* properties (properties), dynamic device states (states) and per-block dynamic
|
| 125 |
+
* shared memory allocation (dynamicSMemSize) in bytes, and output it through
|
| 126 |
+
* result along with other useful information. The occupancy is computed in
|
| 127 |
+
* terms of the maximum number of active blocks per multiprocessor. The user can
|
| 128 |
+
* then convert it to other metrics, such as number of active warps.
|
| 129 |
+
*
|
| 130 |
+
* RETURN VALUE
|
| 131 |
+
*
|
| 132 |
+
* The occupancy and related information is returned through result.
|
| 133 |
+
*
|
| 134 |
+
* If result->activeBlocksPerMultiprocessor is 0, then the given parameter
|
| 135 |
+
* combination cannot run on the device.
|
| 136 |
+
*
|
| 137 |
+
* ERRORS
|
| 138 |
+
*
|
| 139 |
+
* CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
|
| 140 |
+
* CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
|
| 141 |
+
* current implementation or device is invalid
|
| 142 |
+
*/
|
| 143 |
+
static __OCC_INLINE
|
| 144 |
+
cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
|
| 145 |
+
cudaOccResult *result, // out
|
| 146 |
+
const cudaOccDeviceProp *properties, // in
|
| 147 |
+
const cudaOccFuncAttributes *attributes, // in
|
| 148 |
+
const cudaOccDeviceState *state, // in
|
| 149 |
+
int blockSize, // in
|
| 150 |
+
size_t dynamicSmemSize); // in
|
| 151 |
+
|
| 152 |
+
/**
|
| 153 |
+
* The CUDA launch configurator C API suggests a grid / block size pair (in
|
| 154 |
+
* minGridSize and blockSize) that achieves the best potential occupancy
|
| 155 |
+
* (i.e. maximum number of active warps with the smallest number of blocks) for
|
| 156 |
+
* the given function described by attributes, on a device described by
|
| 157 |
+
* properties with settings in state.
|
| 158 |
+
*
|
| 159 |
+
* If per-block dynamic shared memory allocation is not needed, the user should
|
| 160 |
+
* leave both blockSizeToDynamicSMemSize and dynamicSMemSize as 0.
|
| 161 |
+
*
|
| 162 |
+
* If per-block dynamic shared memory allocation is needed, then if the dynamic
|
| 163 |
+
* shared memory size is constant regardless of block size, the size should be
|
| 164 |
+
* passed through dynamicSMemSize, and blockSizeToDynamicSMemSize should be
|
| 165 |
+
* NULL.
|
| 166 |
+
*
|
| 167 |
+
* Otherwise, if the per-block dynamic shared memory size varies with different
|
| 168 |
+
* block sizes, the user needs to provide a pointer to an unary function through
|
| 169 |
+
* blockSizeToDynamicSMemSize that computes the dynamic shared memory needed by
|
| 170 |
+
* a block of the function for any given block size. dynamicSMemSize is
|
| 171 |
+
* ignored. An example signature is:
|
| 172 |
+
*
|
| 173 |
+
* // Take block size, returns dynamic shared memory needed
|
| 174 |
+
* size_t blockToSmem(int blockSize);
|
| 175 |
+
*
|
| 176 |
+
* RETURN VALUE
|
| 177 |
+
*
|
| 178 |
+
* The suggested block size and the minimum number of blocks needed to achieve
|
| 179 |
+
* the maximum occupancy are returned through blockSize and minGridSize.
|
| 180 |
+
*
|
| 181 |
+
* If *blockSize is 0, then the given combination cannot run on the device.
|
| 182 |
+
*
|
| 183 |
+
* ERRORS
|
| 184 |
+
*
|
| 185 |
+
* CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
|
| 186 |
+
* CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
|
| 187 |
+
* current implementation or device is invalid
|
| 188 |
+
*
|
| 189 |
+
*/
|
| 190 |
+
static __OCC_INLINE
|
| 191 |
+
cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
|
| 192 |
+
int *minGridSize, // out
|
| 193 |
+
int *blockSize, // out
|
| 194 |
+
const cudaOccDeviceProp *properties, // in
|
| 195 |
+
const cudaOccFuncAttributes *attributes, // in
|
| 196 |
+
const cudaOccDeviceState *state, // in
|
| 197 |
+
size_t (*blockSizeToDynamicSMemSize)(int), // in
|
| 198 |
+
size_t dynamicSMemSize); // in
|
| 199 |
+
|
| 200 |
+
/**
|
| 201 |
+
* The CUDA launch configurator C++ API suggests a grid / block size pair (in
|
| 202 |
+
* minGridSize and blockSize) that achieves the best potential occupancy
|
| 203 |
+
* (i.e. the maximum number of active warps with the smallest number of blocks)
|
| 204 |
+
* for the given function described by attributes, on a device described by
|
| 205 |
+
* properties with settings in state.
|
| 206 |
+
*
|
| 207 |
+
* If per-block dynamic shared memory allocation is 0 or constant regardless of
|
| 208 |
+
* block size, the user can use cudaOccMaxPotentialOccupancyBlockSize to
|
| 209 |
+
* configure the launch. A constant dynamic shared memory allocation size in
|
| 210 |
+
* bytes can be passed through dynamicSMemSize.
|
| 211 |
+
*
|
| 212 |
+
* Otherwise, if the per-block dynamic shared memory size varies with different
|
| 213 |
+
* block sizes, the user needs to use
|
| 214 |
+
* cudaOccMaxPotentialOccupancyBlockSizeVariableSmem instead, and provide a
|
| 215 |
+
* functor / pointer to an unary function (blockSizeToDynamicSMemSize) that
|
| 216 |
+
* computes the dynamic shared memory needed by func for any given block
|
| 217 |
+
* size. An example signature is:
|
| 218 |
+
*
|
| 219 |
+
* // Take block size, returns per-block dynamic shared memory needed
|
| 220 |
+
* size_t blockToSmem(int blockSize);
|
| 221 |
+
*
|
| 222 |
+
* RETURN VALUE
|
| 223 |
+
*
|
| 224 |
+
* The suggested block size and the minimum number of blocks needed to achieve
|
| 225 |
+
* the maximum occupancy are returned through blockSize and minGridSize.
|
| 226 |
+
*
|
| 227 |
+
* If *blockSize is 0, then the given combination cannot run on the device.
|
| 228 |
+
*
|
| 229 |
+
* ERRORS
|
| 230 |
+
*
|
| 231 |
+
* CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
|
| 232 |
+
* CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
|
| 233 |
+
* current implementation or device is invalid
|
| 234 |
+
*
|
| 235 |
+
*/
|
| 236 |
+
|
| 237 |
+
#if defined(__cplusplus)
|
| 238 |
+
namespace {
|
| 239 |
+
|
| 240 |
+
__OCC_INLINE
|
| 241 |
+
cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
|
| 242 |
+
int *minGridSize, // out
|
| 243 |
+
int *blockSize, // out
|
| 244 |
+
const cudaOccDeviceProp *properties, // in
|
| 245 |
+
const cudaOccFuncAttributes *attributes, // in
|
| 246 |
+
const cudaOccDeviceState *state, // in
|
| 247 |
+
size_t dynamicSMemSize = 0); // in
|
| 248 |
+
|
| 249 |
+
template <typename UnaryFunction>
|
| 250 |
+
__OCC_INLINE
|
| 251 |
+
cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
|
| 252 |
+
int *minGridSize, // out
|
| 253 |
+
int *blockSize, // out
|
| 254 |
+
const cudaOccDeviceProp *properties, // in
|
| 255 |
+
const cudaOccFuncAttributes *attributes, // in
|
| 256 |
+
const cudaOccDeviceState *state, // in
|
| 257 |
+
UnaryFunction blockSizeToDynamicSMemSize); // in
|
| 258 |
+
|
| 259 |
+
} // namespace anonymous
|
| 260 |
+
#endif // defined(__cplusplus)
|
| 261 |
+
|
| 262 |
+
/**
|
| 263 |
+
*
|
| 264 |
+
* The CUDA dynamic shared memory calculator computes the maximum size of
|
| 265 |
+
* per-block dynamic shared memory if we want to place numBlocks blocks
|
| 266 |
+
* on an SM.
|
| 267 |
+
*
|
| 268 |
+
* RETURN VALUE
|
| 269 |
+
*
|
| 270 |
+
* Returns in *dynamicSmemSize the maximum size of dynamic shared memory to allow
|
| 271 |
+
* numBlocks blocks per SM.
|
| 272 |
+
*
|
| 273 |
+
* ERRORS
|
| 274 |
+
*
|
| 275 |
+
* CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
|
| 276 |
+
* CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
|
| 277 |
+
* current implementation or device is invalid
|
| 278 |
+
*
|
| 279 |
+
*/
|
| 280 |
+
static __OCC_INLINE
|
| 281 |
+
cudaOccError cudaOccAvailableDynamicSMemPerBlock(
|
| 282 |
+
size_t *dynamicSmemSize,
|
| 283 |
+
const cudaOccDeviceProp *properties,
|
| 284 |
+
const cudaOccFuncAttributes *attributes,
|
| 285 |
+
const cudaOccDeviceState *state,
|
| 286 |
+
int numBlocks,
|
| 287 |
+
int blockSize);
|
| 288 |
+
|
| 289 |
+
/**
|
| 290 |
+
* Data structures
|
| 291 |
+
*
|
| 292 |
+
* These structures are subject to change for future architecture and CUDA
|
| 293 |
+
* releases. C users should initialize the structure as {0}.
|
| 294 |
+
*
|
| 295 |
+
*/
|
| 296 |
+
|
| 297 |
+
/**
|
| 298 |
+
* Device descriptor
|
| 299 |
+
*
|
| 300 |
+
* This structure describes a device.
|
| 301 |
+
*/
|
| 302 |
+
struct cudaOccDeviceProp {
|
| 303 |
+
int computeMajor; // Compute capability major version
|
| 304 |
+
int computeMinor; // Compute capability minor
|
| 305 |
+
// version. None supported minor version
|
| 306 |
+
// may cause error
|
| 307 |
+
int maxThreadsPerBlock; // Maximum number of threads per block
|
| 308 |
+
int maxThreadsPerMultiprocessor; // Maximum number of threads per SM
|
| 309 |
+
// i.e. (Max. number of warps) x (warp
|
| 310 |
+
// size)
|
| 311 |
+
int regsPerBlock; // Maximum number of registers per block
|
| 312 |
+
int regsPerMultiprocessor; // Maximum number of registers per SM
|
| 313 |
+
int warpSize; // Warp size
|
| 314 |
+
size_t sharedMemPerBlock; // Maximum shared memory size per block
|
| 315 |
+
size_t sharedMemPerMultiprocessor; // Maximum shared memory size per SM
|
| 316 |
+
int numSms; // Number of SMs available
|
| 317 |
+
size_t sharedMemPerBlockOptin; // Maximum optin shared memory size per block
|
| 318 |
+
size_t reservedSharedMemPerBlock; // Shared memory per block reserved by driver
|
| 319 |
+
|
| 320 |
+
#ifdef __cplusplus
|
| 321 |
+
// This structure can be converted from a cudaDeviceProp structure for users
|
| 322 |
+
// that use this header in their CUDA applications.
|
| 323 |
+
//
|
| 324 |
+
// If the application have access to the CUDA Runtime API, the application
|
| 325 |
+
// can obtain the device properties of a CUDA device through
|
| 326 |
+
// cudaGetDeviceProperties, and initialize a cudaOccDeviceProp with the
|
| 327 |
+
// cudaDeviceProp structure.
|
| 328 |
+
//
|
| 329 |
+
// Example:
|
| 330 |
+
/*
|
| 331 |
+
{
|
| 332 |
+
cudaDeviceProp prop;
|
| 333 |
+
|
| 334 |
+
cudaGetDeviceProperties(&prop, ...);
|
| 335 |
+
|
| 336 |
+
cudaOccDeviceProp occProp = prop;
|
| 337 |
+
|
| 338 |
+
...
|
| 339 |
+
|
| 340 |
+
cudaOccMaxPotentialOccupancyBlockSize(..., &occProp, ...);
|
| 341 |
+
}
|
| 342 |
+
*/
|
| 343 |
+
//
|
| 344 |
+
template<typename DeviceProp>
|
| 345 |
+
__OCC_INLINE
|
| 346 |
+
cudaOccDeviceProp(const DeviceProp &props)
|
| 347 |
+
: computeMajor (props.major),
|
| 348 |
+
computeMinor (props.minor),
|
| 349 |
+
maxThreadsPerBlock (props.maxThreadsPerBlock),
|
| 350 |
+
maxThreadsPerMultiprocessor (props.maxThreadsPerMultiProcessor),
|
| 351 |
+
regsPerBlock (props.regsPerBlock),
|
| 352 |
+
regsPerMultiprocessor (props.regsPerMultiprocessor),
|
| 353 |
+
warpSize (props.warpSize),
|
| 354 |
+
sharedMemPerBlock (props.sharedMemPerBlock),
|
| 355 |
+
sharedMemPerMultiprocessor (props.sharedMemPerMultiprocessor),
|
| 356 |
+
numSms (props.multiProcessorCount),
|
| 357 |
+
sharedMemPerBlockOptin (props.sharedMemPerBlockOptin),
|
| 358 |
+
reservedSharedMemPerBlock (props.reservedSharedMemPerBlock)
|
| 359 |
+
{}
|
| 360 |
+
|
| 361 |
+
__OCC_INLINE
|
| 362 |
+
cudaOccDeviceProp()
|
| 363 |
+
: computeMajor (0),
|
| 364 |
+
computeMinor (0),
|
| 365 |
+
maxThreadsPerBlock (0),
|
| 366 |
+
maxThreadsPerMultiprocessor (0),
|
| 367 |
+
regsPerBlock (0),
|
| 368 |
+
regsPerMultiprocessor (0),
|
| 369 |
+
warpSize (0),
|
| 370 |
+
sharedMemPerBlock (0),
|
| 371 |
+
sharedMemPerMultiprocessor (0),
|
| 372 |
+
numSms (0),
|
| 373 |
+
sharedMemPerBlockOptin (0),
|
| 374 |
+
reservedSharedMemPerBlock (0)
|
| 375 |
+
{}
|
| 376 |
+
#endif // __cplusplus
|
| 377 |
+
};
|
| 378 |
+
|
| 379 |
+
/**
|
| 380 |
+
* Partitioned global caching option
|
| 381 |
+
*/
|
| 382 |
+
typedef enum cudaOccPartitionedGCConfig_enum {
|
| 383 |
+
PARTITIONED_GC_OFF, // Disable partitioned global caching
|
| 384 |
+
PARTITIONED_GC_ON, // Prefer partitioned global caching
|
| 385 |
+
PARTITIONED_GC_ON_STRICT // Force partitioned global caching
|
| 386 |
+
} cudaOccPartitionedGCConfig;
|
| 387 |
+
|
| 388 |
+
/**
|
| 389 |
+
* Per function opt in maximum dynamic shared memory limit
|
| 390 |
+
*/
|
| 391 |
+
typedef enum cudaOccFuncShmemConfig_enum {
|
| 392 |
+
FUNC_SHMEM_LIMIT_DEFAULT, // Default shmem limit
|
| 393 |
+
FUNC_SHMEM_LIMIT_OPTIN, // Use the optin shmem limit
|
| 394 |
+
} cudaOccFuncShmemConfig;
|
| 395 |
+
|
| 396 |
+
/**
|
| 397 |
+
* Function descriptor
|
| 398 |
+
*
|
| 399 |
+
* This structure describes a CUDA function.
|
| 400 |
+
*/
|
| 401 |
+
struct cudaOccFuncAttributes {
|
| 402 |
+
int maxThreadsPerBlock; // Maximum block size the function can work with. If
|
| 403 |
+
// unlimited, use INT_MAX or any value greater than
|
| 404 |
+
// or equal to maxThreadsPerBlock of the device
|
| 405 |
+
int numRegs; // Number of registers used. When the function is
|
| 406 |
+
// launched on device, the register count may change
|
| 407 |
+
// due to internal tools requirements.
|
| 408 |
+
size_t sharedSizeBytes; // Number of static shared memory used
|
| 409 |
+
|
| 410 |
+
cudaOccPartitionedGCConfig partitionedGCConfig;
|
| 411 |
+
// Partitioned global caching is required to enable
|
| 412 |
+
// caching on certain chips, such as sm_52
|
| 413 |
+
// devices. Partitioned global caching can be
|
| 414 |
+
// automatically disabled if the occupancy
|
| 415 |
+
// requirement of the launch cannot support caching.
|
| 416 |
+
//
|
| 417 |
+
// To override this behavior with caching on and
|
| 418 |
+
// calculate occupancy strictly according to the
|
| 419 |
+
// preference, set partitionedGCConfig to
|
| 420 |
+
// PARTITIONED_GC_ON_STRICT. This is especially
|
| 421 |
+
// useful for experimenting and finding launch
|
| 422 |
+
// configurations (MaxPotentialOccupancyBlockSize)
|
| 423 |
+
// that allow global caching to take effect.
|
| 424 |
+
//
|
| 425 |
+
// This flag only affects the occupancy calculation.
|
| 426 |
+
|
| 427 |
+
cudaOccFuncShmemConfig shmemLimitConfig;
|
| 428 |
+
// Certain chips like sm_70 allow a user to opt into
|
| 429 |
+
// a higher per block limit of dynamic shared memory
|
| 430 |
+
// This optin is performed on a per function basis
|
| 431 |
+
// using the cuFuncSetAttribute function
|
| 432 |
+
|
| 433 |
+
size_t maxDynamicSharedSizeBytes;
|
| 434 |
+
// User set limit on maximum dynamic shared memory
|
| 435 |
+
// usable by the kernel
|
| 436 |
+
// This limit is set using the cuFuncSetAttribute
|
| 437 |
+
// function.
|
| 438 |
+
|
| 439 |
+
int numBlockBarriers; // Number of block barriers used (default to 1)
|
| 440 |
+
#ifdef __cplusplus
|
| 441 |
+
// This structure can be converted from a cudaFuncAttributes structure for
|
| 442 |
+
// users that use this header in their CUDA applications.
|
| 443 |
+
//
|
| 444 |
+
// If the application have access to the CUDA Runtime API, the application
|
| 445 |
+
// can obtain the function attributes of a CUDA kernel function through
|
| 446 |
+
// cudaFuncGetAttributes, and initialize a cudaOccFuncAttributes with the
|
| 447 |
+
// cudaFuncAttributes structure.
|
| 448 |
+
//
|
| 449 |
+
// Example:
|
| 450 |
+
/*
|
| 451 |
+
__global__ void foo() {...}
|
| 452 |
+
|
| 453 |
+
...
|
| 454 |
+
|
| 455 |
+
{
|
| 456 |
+
cudaFuncAttributes attr;
|
| 457 |
+
|
| 458 |
+
cudaFuncGetAttributes(&attr, foo);
|
| 459 |
+
|
| 460 |
+
cudaOccFuncAttributes occAttr = attr;
|
| 461 |
+
|
| 462 |
+
...
|
| 463 |
+
|
| 464 |
+
cudaOccMaxPotentialOccupancyBlockSize(..., &occAttr, ...);
|
| 465 |
+
}
|
| 466 |
+
*/
|
| 467 |
+
//
|
| 468 |
+
template<typename FuncAttributes>
|
| 469 |
+
__OCC_INLINE
|
| 470 |
+
cudaOccFuncAttributes(const FuncAttributes &attr)
|
| 471 |
+
: maxThreadsPerBlock (attr.maxThreadsPerBlock),
|
| 472 |
+
numRegs (attr.numRegs),
|
| 473 |
+
sharedSizeBytes (attr.sharedSizeBytes),
|
| 474 |
+
partitionedGCConfig (PARTITIONED_GC_OFF),
|
| 475 |
+
shmemLimitConfig (FUNC_SHMEM_LIMIT_OPTIN),
|
| 476 |
+
maxDynamicSharedSizeBytes (attr.maxDynamicSharedSizeBytes),
|
| 477 |
+
numBlockBarriers (1)
|
| 478 |
+
{}
|
| 479 |
+
|
| 480 |
+
__OCC_INLINE
|
| 481 |
+
cudaOccFuncAttributes()
|
| 482 |
+
: maxThreadsPerBlock (0),
|
| 483 |
+
numRegs (0),
|
| 484 |
+
sharedSizeBytes (0),
|
| 485 |
+
partitionedGCConfig (PARTITIONED_GC_OFF),
|
| 486 |
+
shmemLimitConfig (FUNC_SHMEM_LIMIT_DEFAULT),
|
| 487 |
+
maxDynamicSharedSizeBytes (0),
|
| 488 |
+
numBlockBarriers (0)
|
| 489 |
+
{}
|
| 490 |
+
#endif
|
| 491 |
+
};
|
| 492 |
+
|
| 493 |
+
typedef enum cudaOccCacheConfig_enum {
|
| 494 |
+
CACHE_PREFER_NONE = 0x00, // no preference for shared memory or L1 (default)
|
| 495 |
+
CACHE_PREFER_SHARED = 0x01, // prefer larger shared memory and smaller L1 cache
|
| 496 |
+
CACHE_PREFER_L1 = 0x02, // prefer larger L1 cache and smaller shared memory
|
| 497 |
+
CACHE_PREFER_EQUAL = 0x03 // prefer equal sized L1 cache and shared memory
|
| 498 |
+
} cudaOccCacheConfig;
|
| 499 |
+
|
| 500 |
+
typedef enum cudaOccCarveoutConfig_enum {
|
| 501 |
+
SHAREDMEM_CARVEOUT_DEFAULT = -1, // no preference for shared memory or L1 (default)
|
| 502 |
+
SHAREDMEM_CARVEOUT_MAX_SHARED = 100, // prefer maximum available shared memory, minimum L1 cache
|
| 503 |
+
SHAREDMEM_CARVEOUT_MAX_L1 = 0, // prefer maximum available L1 cache, minimum shared memory
|
| 504 |
+
SHAREDMEM_CARVEOUT_HALF = 50 // prefer half of maximum available shared memory, with the rest as L1 cache
|
| 505 |
+
} cudaOccCarveoutConfig;
|
| 506 |
+
|
| 507 |
+
/**
|
| 508 |
+
* Device state descriptor
|
| 509 |
+
*
|
| 510 |
+
* This structure describes device settings that affect occupancy calculation.
|
| 511 |
+
*/
|
| 512 |
+
struct cudaOccDeviceState
|
| 513 |
+
{
|
| 514 |
+
// Cache / shared memory split preference. Deprecated on Volta
|
| 515 |
+
cudaOccCacheConfig cacheConfig;
|
| 516 |
+
// Shared memory / L1 split preference. Supported on only Volta
|
| 517 |
+
int carveoutConfig;
|
| 518 |
+
|
| 519 |
+
#ifdef __cplusplus
|
| 520 |
+
__OCC_INLINE
|
| 521 |
+
cudaOccDeviceState()
|
| 522 |
+
: cacheConfig (CACHE_PREFER_NONE),
|
| 523 |
+
carveoutConfig (SHAREDMEM_CARVEOUT_DEFAULT)
|
| 524 |
+
{}
|
| 525 |
+
#endif
|
| 526 |
+
};
|
| 527 |
+
|
| 528 |
+
typedef enum cudaOccLimitingFactor_enum {
|
| 529 |
+
// Occupancy limited due to:
|
| 530 |
+
OCC_LIMIT_WARPS = 0x01, // - warps available
|
| 531 |
+
OCC_LIMIT_REGISTERS = 0x02, // - registers available
|
| 532 |
+
OCC_LIMIT_SHARED_MEMORY = 0x04, // - shared memory available
|
| 533 |
+
OCC_LIMIT_BLOCKS = 0x08, // - blocks available
|
| 534 |
+
OCC_LIMIT_BARRIERS = 0x10 // - barrier available
|
| 535 |
+
} cudaOccLimitingFactor;
|
| 536 |
+
|
| 537 |
+
/**
|
| 538 |
+
* Occupancy output
|
| 539 |
+
*
|
| 540 |
+
* This structure contains occupancy calculator's output.
|
| 541 |
+
*/
|
| 542 |
+
struct cudaOccResult {
|
| 543 |
+
int activeBlocksPerMultiprocessor; // Occupancy
|
| 544 |
+
unsigned int limitingFactors; // Factors that limited occupancy. A bit
|
| 545 |
+
// field that counts the limiting
|
| 546 |
+
// factors, see cudaOccLimitingFactor
|
| 547 |
+
int blockLimitRegs; // Occupancy due to register
|
| 548 |
+
// usage, INT_MAX if the kernel does not
|
| 549 |
+
// use any register.
|
| 550 |
+
int blockLimitSharedMem; // Occupancy due to shared memory
|
| 551 |
+
// usage, INT_MAX if the kernel does not
|
| 552 |
+
// use shared memory.
|
| 553 |
+
int blockLimitWarps; // Occupancy due to block size limit
|
| 554 |
+
int blockLimitBlocks; // Occupancy due to maximum number of blocks
|
| 555 |
+
// managable per SM
|
| 556 |
+
int blockLimitBarriers; // Occupancy due to block barrier usage
|
| 557 |
+
int allocatedRegistersPerBlock; // Actual number of registers allocated per
|
| 558 |
+
// block
|
| 559 |
+
size_t allocatedSharedMemPerBlock; // Actual size of shared memory allocated
|
| 560 |
+
// per block
|
| 561 |
+
cudaOccPartitionedGCConfig partitionedGCConfig;
|
| 562 |
+
// Report if partitioned global caching
|
| 563 |
+
// is actually enabled.
|
| 564 |
+
};
|
| 565 |
+
|
| 566 |
+
/**
|
| 567 |
+
* Partitioned global caching support
|
| 568 |
+
*
|
| 569 |
+
* See cudaOccPartitionedGlobalCachingModeSupport
|
| 570 |
+
*/
|
| 571 |
+
typedef enum cudaOccPartitionedGCSupport_enum {
|
| 572 |
+
PARTITIONED_GC_NOT_SUPPORTED, // Partitioned global caching is not supported
|
| 573 |
+
PARTITIONED_GC_SUPPORTED, // Partitioned global caching is supported
|
| 574 |
+
} cudaOccPartitionedGCSupport;
|
| 575 |
+
|
| 576 |
+
/**
|
| 577 |
+
* Implementation
|
| 578 |
+
*/
|
| 579 |
+
|
| 580 |
+
/**
|
| 581 |
+
* Max compute capability supported
|
| 582 |
+
*/
|
| 583 |
+
#define __CUDA_OCC_MAJOR__ 9
|
| 584 |
+
#define __CUDA_OCC_MINOR__ 0
|
| 585 |
+
|
| 586 |
+
//////////////////////////////////////////
|
| 587 |
+
// Mathematical Helper Functions //
|
| 588 |
+
//////////////////////////////////////////
|
| 589 |
+
|
| 590 |
+
static __OCC_INLINE int __occMin(int lhs, int rhs)
|
| 591 |
+
{
|
| 592 |
+
return rhs < lhs ? rhs : lhs;
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
static __OCC_INLINE int __occDivideRoundUp(int x, int y)
|
| 596 |
+
{
|
| 597 |
+
return (x + (y - 1)) / y;
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
static __OCC_INLINE int __occRoundUp(int x, int y)
|
| 601 |
+
{
|
| 602 |
+
return y * __occDivideRoundUp(x, y);
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
//////////////////////////////////////////
|
| 606 |
+
// Architectural Properties //
|
| 607 |
+
//////////////////////////////////////////
|
| 608 |
+
|
| 609 |
+
/**
|
| 610 |
+
* Granularity of shared memory allocation
|
| 611 |
+
*/
|
| 612 |
+
static __OCC_INLINE cudaOccError cudaOccSMemAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
|
| 613 |
+
{
|
| 614 |
+
int value;
|
| 615 |
+
|
| 616 |
+
switch(properties->computeMajor) {
|
| 617 |
+
case 3:
|
| 618 |
+
case 5:
|
| 619 |
+
case 6:
|
| 620 |
+
case 7:
|
| 621 |
+
value = 256;
|
| 622 |
+
break;
|
| 623 |
+
case 8:
|
| 624 |
+
case 9:
|
| 625 |
+
value = 128;
|
| 626 |
+
break;
|
| 627 |
+
default:
|
| 628 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 629 |
+
}
|
| 630 |
+
|
| 631 |
+
*limit = value;
|
| 632 |
+
|
| 633 |
+
return CUDA_OCC_SUCCESS;
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
/**
|
| 637 |
+
* Maximum number of registers per thread
|
| 638 |
+
*/
|
| 639 |
+
static __OCC_INLINE cudaOccError cudaOccRegAllocationMaxPerThread(int *limit, const cudaOccDeviceProp *properties)
|
| 640 |
+
{
|
| 641 |
+
int value;
|
| 642 |
+
|
| 643 |
+
switch(properties->computeMajor) {
|
| 644 |
+
case 3:
|
| 645 |
+
case 5:
|
| 646 |
+
case 6:
|
| 647 |
+
value = 255;
|
| 648 |
+
break;
|
| 649 |
+
case 7:
|
| 650 |
+
case 8:
|
| 651 |
+
case 9:
|
| 652 |
+
value = 256;
|
| 653 |
+
break;
|
| 654 |
+
default:
|
| 655 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
*limit = value;
|
| 659 |
+
|
| 660 |
+
return CUDA_OCC_SUCCESS;
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
/**
|
| 664 |
+
* Granularity of register allocation
|
| 665 |
+
*/
|
| 666 |
+
static __OCC_INLINE cudaOccError cudaOccRegAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
|
| 667 |
+
{
|
| 668 |
+
int value;
|
| 669 |
+
|
| 670 |
+
switch(properties->computeMajor) {
|
| 671 |
+
case 3:
|
| 672 |
+
case 5:
|
| 673 |
+
case 6:
|
| 674 |
+
case 7:
|
| 675 |
+
case 8:
|
| 676 |
+
case 9:
|
| 677 |
+
value = 256;
|
| 678 |
+
break;
|
| 679 |
+
default:
|
| 680 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
*limit = value;
|
| 684 |
+
|
| 685 |
+
return CUDA_OCC_SUCCESS;
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
/**
|
| 689 |
+
* Number of sub-partitions
|
| 690 |
+
*/
|
| 691 |
+
static __OCC_INLINE cudaOccError cudaOccSubPartitionsPerMultiprocessor(int *limit, const cudaOccDeviceProp *properties)
|
| 692 |
+
{
|
| 693 |
+
int value;
|
| 694 |
+
|
| 695 |
+
switch(properties->computeMajor) {
|
| 696 |
+
case 3:
|
| 697 |
+
case 5:
|
| 698 |
+
case 7:
|
| 699 |
+
case 8:
|
| 700 |
+
case 9:
|
| 701 |
+
value = 4;
|
| 702 |
+
break;
|
| 703 |
+
case 6:
|
| 704 |
+
value = properties->computeMinor ? 4 : 2;
|
| 705 |
+
break;
|
| 706 |
+
default:
|
| 707 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 708 |
+
}
|
| 709 |
+
|
| 710 |
+
*limit = value;
|
| 711 |
+
|
| 712 |
+
return CUDA_OCC_SUCCESS;
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
|
| 716 |
+
/**
|
| 717 |
+
* Maximum number of blocks that can run simultaneously on a multiprocessor
|
| 718 |
+
*/
|
| 719 |
+
static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerMultiprocessor(int* limit, const cudaOccDeviceProp *properties)
|
| 720 |
+
{
|
| 721 |
+
int value;
|
| 722 |
+
|
| 723 |
+
switch(properties->computeMajor) {
|
| 724 |
+
case 3:
|
| 725 |
+
value = 16;
|
| 726 |
+
break;
|
| 727 |
+
case 5:
|
| 728 |
+
case 6:
|
| 729 |
+
value = 32;
|
| 730 |
+
break;
|
| 731 |
+
case 7: {
|
| 732 |
+
int isTuring = properties->computeMinor == 5;
|
| 733 |
+
value = (isTuring) ? 16 : 32;
|
| 734 |
+
break;
|
| 735 |
+
}
|
| 736 |
+
case 8:
|
| 737 |
+
if (properties->computeMinor == 0) {
|
| 738 |
+
value = 32;
|
| 739 |
+
}
|
| 740 |
+
else if (properties->computeMinor == 9) {
|
| 741 |
+
value = 24;
|
| 742 |
+
}
|
| 743 |
+
else {
|
| 744 |
+
value = 16;
|
| 745 |
+
}
|
| 746 |
+
break;
|
| 747 |
+
case 9:
|
| 748 |
+
value = 32;
|
| 749 |
+
break;
|
| 750 |
+
default:
|
| 751 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
*limit = value;
|
| 755 |
+
|
| 756 |
+
return CUDA_OCC_SUCCESS;
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
/**
|
| 760 |
+
* Align up shared memory based on compute major configurations
|
| 761 |
+
*/
|
| 762 |
+
static __OCC_INLINE cudaOccError cudaOccAlignUpShmemSizeVoltaPlus(size_t *shMemSize, const cudaOccDeviceProp *properties)
|
| 763 |
+
{
|
| 764 |
+
// Volta and Turing have shared L1 cache / shared memory, and support cache
|
| 765 |
+
// configuration to trade one for the other. These values are needed to
|
| 766 |
+
// map carveout config ratio to the next available architecture size
|
| 767 |
+
size_t size = *shMemSize;
|
| 768 |
+
|
| 769 |
+
switch (properties->computeMajor) {
|
| 770 |
+
case 7: {
|
| 771 |
+
// Turing supports 32KB and 64KB shared mem.
|
| 772 |
+
int isTuring = properties->computeMinor == 5;
|
| 773 |
+
if (isTuring) {
|
| 774 |
+
if (size <= 32 * 1024) {
|
| 775 |
+
*shMemSize = 32 * 1024;
|
| 776 |
+
}
|
| 777 |
+
else if (size <= 64 * 1024) {
|
| 778 |
+
*shMemSize = 64 * 1024;
|
| 779 |
+
}
|
| 780 |
+
else {
|
| 781 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 782 |
+
}
|
| 783 |
+
}
|
| 784 |
+
// Volta supports 0KB, 8KB, 16KB, 32KB, 64KB, and 96KB shared mem.
|
| 785 |
+
else {
|
| 786 |
+
if (size == 0) {
|
| 787 |
+
*shMemSize = 0;
|
| 788 |
+
}
|
| 789 |
+
else if (size <= 8 * 1024) {
|
| 790 |
+
*shMemSize = 8 * 1024;
|
| 791 |
+
}
|
| 792 |
+
else if (size <= 16 * 1024) {
|
| 793 |
+
*shMemSize = 16 * 1024;
|
| 794 |
+
}
|
| 795 |
+
else if (size <= 32 * 1024) {
|
| 796 |
+
*shMemSize = 32 * 1024;
|
| 797 |
+
}
|
| 798 |
+
else if (size <= 64 * 1024) {
|
| 799 |
+
*shMemSize = 64 * 1024;
|
| 800 |
+
}
|
| 801 |
+
else if (size <= 96 * 1024) {
|
| 802 |
+
*shMemSize = 96 * 1024;
|
| 803 |
+
}
|
| 804 |
+
else {
|
| 805 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 806 |
+
}
|
| 807 |
+
}
|
| 808 |
+
break;
|
| 809 |
+
}
|
| 810 |
+
case 8:
|
| 811 |
+
if (properties->computeMinor == 0 || properties->computeMinor == 7) {
|
| 812 |
+
if (size == 0) {
|
| 813 |
+
*shMemSize = 0;
|
| 814 |
+
}
|
| 815 |
+
else if (size <= 8 * 1024) {
|
| 816 |
+
*shMemSize = 8 * 1024;
|
| 817 |
+
}
|
| 818 |
+
else if (size <= 16 * 1024) {
|
| 819 |
+
*shMemSize = 16 * 1024;
|
| 820 |
+
}
|
| 821 |
+
else if (size <= 32 * 1024) {
|
| 822 |
+
*shMemSize = 32 * 1024;
|
| 823 |
+
}
|
| 824 |
+
else if (size <= 64 * 1024) {
|
| 825 |
+
*shMemSize = 64 * 1024;
|
| 826 |
+
}
|
| 827 |
+
else if (size <= 100 * 1024) {
|
| 828 |
+
*shMemSize = 100 * 1024;
|
| 829 |
+
}
|
| 830 |
+
else if (size <= 132 * 1024) {
|
| 831 |
+
*shMemSize = 132 * 1024;
|
| 832 |
+
}
|
| 833 |
+
else if (size <= 164 * 1024) {
|
| 834 |
+
*shMemSize = 164 * 1024;
|
| 835 |
+
}
|
| 836 |
+
else {
|
| 837 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 838 |
+
}
|
| 839 |
+
}
|
| 840 |
+
else {
|
| 841 |
+
if (size == 0) {
|
| 842 |
+
*shMemSize = 0;
|
| 843 |
+
}
|
| 844 |
+
else if (size <= 8 * 1024) {
|
| 845 |
+
*shMemSize = 8 * 1024;
|
| 846 |
+
}
|
| 847 |
+
else if (size <= 16 * 1024) {
|
| 848 |
+
*shMemSize = 16 * 1024;
|
| 849 |
+
}
|
| 850 |
+
else if (size <= 32 * 1024) {
|
| 851 |
+
*shMemSize = 32 * 1024;
|
| 852 |
+
}
|
| 853 |
+
else if (size <= 64 * 1024) {
|
| 854 |
+
*shMemSize = 64 * 1024;
|
| 855 |
+
}
|
| 856 |
+
else if (size <= 100 * 1024) {
|
| 857 |
+
*shMemSize = 100 * 1024;
|
| 858 |
+
}
|
| 859 |
+
else {
|
| 860 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 861 |
+
}
|
| 862 |
+
}
|
| 863 |
+
break;
|
| 864 |
+
case 9: {
|
| 865 |
+
if (size == 0) {
|
| 866 |
+
*shMemSize = 0;
|
| 867 |
+
}
|
| 868 |
+
else if (size <= 8 * 1024) {
|
| 869 |
+
*shMemSize = 8 * 1024;
|
| 870 |
+
}
|
| 871 |
+
else if (size <= 16 * 1024) {
|
| 872 |
+
*shMemSize = 16 * 1024;
|
| 873 |
+
}
|
| 874 |
+
else if (size <= 32 * 1024) {
|
| 875 |
+
*shMemSize = 32 * 1024;
|
| 876 |
+
}
|
| 877 |
+
else if (size <= 64 * 1024) {
|
| 878 |
+
*shMemSize = 64 * 1024;
|
| 879 |
+
}
|
| 880 |
+
else if (size <= 100 * 1024) {
|
| 881 |
+
*shMemSize = 100 * 1024;
|
| 882 |
+
}
|
| 883 |
+
else if (size <= 132 * 1024) {
|
| 884 |
+
*shMemSize = 132 * 1024;
|
| 885 |
+
}
|
| 886 |
+
else if (size <= 164 * 1024) {
|
| 887 |
+
*shMemSize = 164 * 1024;
|
| 888 |
+
}
|
| 889 |
+
else if (size <= 196 * 1024) {
|
| 890 |
+
*shMemSize = 196 * 1024;
|
| 891 |
+
}
|
| 892 |
+
else if (size <= 228 * 1024) {
|
| 893 |
+
*shMemSize = 228 * 1024;
|
| 894 |
+
}
|
| 895 |
+
else {
|
| 896 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 897 |
+
}
|
| 898 |
+
break;
|
| 899 |
+
}
|
| 900 |
+
default:
|
| 901 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 902 |
+
}
|
| 903 |
+
|
| 904 |
+
return CUDA_OCC_SUCCESS;
|
| 905 |
+
}
|
| 906 |
+
|
| 907 |
+
/**
|
| 908 |
+
* Shared memory based on the new carveoutConfig API introduced with Volta
|
| 909 |
+
*/
|
| 910 |
+
static __OCC_INLINE cudaOccError cudaOccSMemPreferenceVoltaPlus(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
|
| 911 |
+
{
|
| 912 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 913 |
+
size_t preferenceShmemSize;
|
| 914 |
+
|
| 915 |
+
// CUDA 9.0 introduces a new API to set shared memory - L1 configuration on supported
|
| 916 |
+
// devices. This preference will take precedence over the older cacheConfig setting.
|
| 917 |
+
// Map cacheConfig to its effective preference value.
|
| 918 |
+
int effectivePreference = state->carveoutConfig;
|
| 919 |
+
if ((effectivePreference < SHAREDMEM_CARVEOUT_DEFAULT) || (effectivePreference > SHAREDMEM_CARVEOUT_MAX_SHARED)) {
|
| 920 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 921 |
+
}
|
| 922 |
+
|
| 923 |
+
if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
|
| 924 |
+
switch (state->cacheConfig)
|
| 925 |
+
{
|
| 926 |
+
case CACHE_PREFER_L1:
|
| 927 |
+
effectivePreference = SHAREDMEM_CARVEOUT_MAX_L1;
|
| 928 |
+
break;
|
| 929 |
+
case CACHE_PREFER_SHARED:
|
| 930 |
+
effectivePreference = SHAREDMEM_CARVEOUT_MAX_SHARED;
|
| 931 |
+
break;
|
| 932 |
+
case CACHE_PREFER_EQUAL:
|
| 933 |
+
effectivePreference = SHAREDMEM_CARVEOUT_HALF;
|
| 934 |
+
break;
|
| 935 |
+
default:
|
| 936 |
+
effectivePreference = SHAREDMEM_CARVEOUT_DEFAULT;
|
| 937 |
+
break;
|
| 938 |
+
}
|
| 939 |
+
}
|
| 940 |
+
|
| 941 |
+
if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
|
| 942 |
+
preferenceShmemSize = properties->sharedMemPerMultiprocessor;
|
| 943 |
+
}
|
| 944 |
+
else {
|
| 945 |
+
preferenceShmemSize = (size_t) (effectivePreference * properties->sharedMemPerMultiprocessor) / 100;
|
| 946 |
+
}
|
| 947 |
+
|
| 948 |
+
status = cudaOccAlignUpShmemSizeVoltaPlus(&preferenceShmemSize, properties);
|
| 949 |
+
*limit = preferenceShmemSize;
|
| 950 |
+
return status;
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
/**
|
| 954 |
+
* Shared memory based on the cacheConfig
|
| 955 |
+
*/
|
| 956 |
+
static __OCC_INLINE cudaOccError cudaOccSMemPreference(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
|
| 957 |
+
{
|
| 958 |
+
size_t bytes = 0;
|
| 959 |
+
size_t sharedMemPerMultiprocessorHigh = properties->sharedMemPerMultiprocessor;
|
| 960 |
+
cudaOccCacheConfig cacheConfig = state->cacheConfig;
|
| 961 |
+
|
| 962 |
+
// Kepler has shared L1 cache / shared memory, and support cache
|
| 963 |
+
// configuration to trade one for the other. These values are needed to
|
| 964 |
+
// calculate the correct shared memory size for user requested cache
|
| 965 |
+
// configuration.
|
| 966 |
+
//
|
| 967 |
+
size_t minCacheSize = 16384;
|
| 968 |
+
size_t maxCacheSize = 49152;
|
| 969 |
+
size_t cacheAndSharedTotal = sharedMemPerMultiprocessorHigh + minCacheSize;
|
| 970 |
+
size_t sharedMemPerMultiprocessorLow = cacheAndSharedTotal - maxCacheSize;
|
| 971 |
+
|
| 972 |
+
switch (properties->computeMajor) {
|
| 973 |
+
case 3:
|
| 974 |
+
// Kepler supports 16KB, 32KB, or 48KB partitions for L1. The rest
|
| 975 |
+
// is shared memory.
|
| 976 |
+
//
|
| 977 |
+
switch (cacheConfig) {
|
| 978 |
+
default :
|
| 979 |
+
case CACHE_PREFER_NONE:
|
| 980 |
+
case CACHE_PREFER_SHARED:
|
| 981 |
+
bytes = sharedMemPerMultiprocessorHigh;
|
| 982 |
+
break;
|
| 983 |
+
case CACHE_PREFER_L1:
|
| 984 |
+
bytes = sharedMemPerMultiprocessorLow;
|
| 985 |
+
break;
|
| 986 |
+
case CACHE_PREFER_EQUAL:
|
| 987 |
+
// Equal is the mid-point between high and low. It should be
|
| 988 |
+
// equivalent to low + 16KB.
|
| 989 |
+
//
|
| 990 |
+
bytes = (sharedMemPerMultiprocessorHigh + sharedMemPerMultiprocessorLow) / 2;
|
| 991 |
+
break;
|
| 992 |
+
}
|
| 993 |
+
break;
|
| 994 |
+
case 5:
|
| 995 |
+
case 6:
|
| 996 |
+
// Maxwell and Pascal have dedicated shared memory.
|
| 997 |
+
//
|
| 998 |
+
bytes = sharedMemPerMultiprocessorHigh;
|
| 999 |
+
break;
|
| 1000 |
+
default:
|
| 1001 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
*limit = bytes;
|
| 1005 |
+
|
| 1006 |
+
return CUDA_OCC_SUCCESS;
|
| 1007 |
+
}
|
| 1008 |
+
|
| 1009 |
+
/**
|
| 1010 |
+
* Shared memory based on config requested by User
|
| 1011 |
+
*/
|
| 1012 |
+
static __OCC_INLINE cudaOccError cudaOccSMemPerMultiprocessor(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
|
| 1013 |
+
{
|
| 1014 |
+
// Volta introduces a new API that allows for shared memory carveout preference. Because it is a shared memory preference,
|
| 1015 |
+
// it is handled separately from the cache config preference.
|
| 1016 |
+
if (properties->computeMajor >= 7) {
|
| 1017 |
+
return cudaOccSMemPreferenceVoltaPlus(limit, properties, state);
|
| 1018 |
+
}
|
| 1019 |
+
return cudaOccSMemPreference(limit, properties, state);
|
| 1020 |
+
}
|
| 1021 |
+
|
| 1022 |
+
/**
|
| 1023 |
+
* Return the per block shared memory limit based on function config
|
| 1024 |
+
*/
|
| 1025 |
+
static __OCC_INLINE cudaOccError cudaOccSMemPerBlock(size_t *limit, const cudaOccDeviceProp *properties, cudaOccFuncShmemConfig shmemLimitConfig, size_t smemPerCta)
|
| 1026 |
+
{
|
| 1027 |
+
switch (properties->computeMajor) {
|
| 1028 |
+
case 2:
|
| 1029 |
+
case 3:
|
| 1030 |
+
case 4:
|
| 1031 |
+
case 5:
|
| 1032 |
+
case 6:
|
| 1033 |
+
*limit = properties->sharedMemPerBlock;
|
| 1034 |
+
break;
|
| 1035 |
+
case 7:
|
| 1036 |
+
case 8:
|
| 1037 |
+
case 9:
|
| 1038 |
+
switch (shmemLimitConfig) {
|
| 1039 |
+
default:
|
| 1040 |
+
case FUNC_SHMEM_LIMIT_DEFAULT:
|
| 1041 |
+
*limit = properties->sharedMemPerBlock;
|
| 1042 |
+
break;
|
| 1043 |
+
case FUNC_SHMEM_LIMIT_OPTIN:
|
| 1044 |
+
if (smemPerCta > properties->sharedMemPerBlock) {
|
| 1045 |
+
*limit = properties->sharedMemPerBlockOptin;
|
| 1046 |
+
}
|
| 1047 |
+
else {
|
| 1048 |
+
*limit = properties->sharedMemPerBlock;
|
| 1049 |
+
}
|
| 1050 |
+
break;
|
| 1051 |
+
}
|
| 1052 |
+
break;
|
| 1053 |
+
default:
|
| 1054 |
+
return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
|
| 1055 |
+
}
|
| 1056 |
+
|
| 1057 |
+
// Starting Ampere, CUDA driver reserves additional shared memory per block
|
| 1058 |
+
if (properties->computeMajor >= 8) {
|
| 1059 |
+
*limit += properties->reservedSharedMemPerBlock;
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
return CUDA_OCC_SUCCESS;
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
/**
|
| 1066 |
+
* Partitioned global caching mode support
|
| 1067 |
+
*/
|
| 1068 |
+
static __OCC_INLINE cudaOccError cudaOccPartitionedGlobalCachingModeSupport(cudaOccPartitionedGCSupport *limit, const cudaOccDeviceProp *properties)
|
| 1069 |
+
{
|
| 1070 |
+
*limit = PARTITIONED_GC_NOT_SUPPORTED;
|
| 1071 |
+
|
| 1072 |
+
if ((properties->computeMajor == 5 && (properties->computeMinor == 2 || properties->computeMinor == 3)) ||
|
| 1073 |
+
properties->computeMajor == 6) {
|
| 1074 |
+
*limit = PARTITIONED_GC_SUPPORTED;
|
| 1075 |
+
}
|
| 1076 |
+
|
| 1077 |
+
if (properties->computeMajor == 6 && properties->computeMinor == 0) {
|
| 1078 |
+
*limit = PARTITIONED_GC_NOT_SUPPORTED;
|
| 1079 |
+
}
|
| 1080 |
+
|
| 1081 |
+
return CUDA_OCC_SUCCESS;
|
| 1082 |
+
}
|
| 1083 |
+
|
| 1084 |
+
///////////////////////////////////////////////
|
| 1085 |
+
// User Input Sanity //
|
| 1086 |
+
///////////////////////////////////////////////
|
| 1087 |
+
|
| 1088 |
+
static __OCC_INLINE cudaOccError cudaOccDevicePropCheck(const cudaOccDeviceProp *properties)
|
| 1089 |
+
{
|
| 1090 |
+
// Verify device properties
|
| 1091 |
+
//
|
| 1092 |
+
// Each of these limits must be a positive number.
|
| 1093 |
+
//
|
| 1094 |
+
// Compute capacity is checked during the occupancy calculation
|
| 1095 |
+
//
|
| 1096 |
+
if (properties->maxThreadsPerBlock <= 0 ||
|
| 1097 |
+
properties->maxThreadsPerMultiprocessor <= 0 ||
|
| 1098 |
+
properties->regsPerBlock <= 0 ||
|
| 1099 |
+
properties->regsPerMultiprocessor <= 0 ||
|
| 1100 |
+
properties->warpSize <= 0 ||
|
| 1101 |
+
properties->sharedMemPerBlock <= 0 ||
|
| 1102 |
+
properties->sharedMemPerMultiprocessor <= 0 ||
|
| 1103 |
+
properties->numSms <= 0) {
|
| 1104 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1105 |
+
}
|
| 1106 |
+
|
| 1107 |
+
return CUDA_OCC_SUCCESS;
|
| 1108 |
+
}
|
| 1109 |
+
|
| 1110 |
+
static __OCC_INLINE cudaOccError cudaOccFuncAttributesCheck(const cudaOccFuncAttributes *attributes)
|
| 1111 |
+
{
|
| 1112 |
+
// Verify function attributes
|
| 1113 |
+
//
|
| 1114 |
+
if (attributes->maxThreadsPerBlock <= 0 ||
|
| 1115 |
+
attributes->numRegs < 0) { // Compiler may choose not to use
|
| 1116 |
+
// any register (empty kernels,
|
| 1117 |
+
// etc.)
|
| 1118 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1119 |
+
}
|
| 1120 |
+
|
| 1121 |
+
return CUDA_OCC_SUCCESS;
|
| 1122 |
+
}
|
| 1123 |
+
|
| 1124 |
+
static __OCC_INLINE cudaOccError cudaOccDeviceStateCheck(const cudaOccDeviceState *state)
|
| 1125 |
+
{
|
| 1126 |
+
(void)state; // silence unused-variable warning
|
| 1127 |
+
// Placeholder
|
| 1128 |
+
//
|
| 1129 |
+
|
| 1130 |
+
return CUDA_OCC_SUCCESS;
|
| 1131 |
+
}
|
| 1132 |
+
|
| 1133 |
+
static __OCC_INLINE cudaOccError cudaOccInputCheck(
|
| 1134 |
+
const cudaOccDeviceProp *properties,
|
| 1135 |
+
const cudaOccFuncAttributes *attributes,
|
| 1136 |
+
const cudaOccDeviceState *state)
|
| 1137 |
+
{
|
| 1138 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1139 |
+
|
| 1140 |
+
status = cudaOccDevicePropCheck(properties);
|
| 1141 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1142 |
+
return status;
|
| 1143 |
+
}
|
| 1144 |
+
|
| 1145 |
+
status = cudaOccFuncAttributesCheck(attributes);
|
| 1146 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1147 |
+
return status;
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
status = cudaOccDeviceStateCheck(state);
|
| 1151 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1152 |
+
return status;
|
| 1153 |
+
}
|
| 1154 |
+
|
| 1155 |
+
return status;
|
| 1156 |
+
}
|
| 1157 |
+
|
| 1158 |
+
///////////////////////////////////////////////
|
| 1159 |
+
// Occupancy calculation Functions //
|
| 1160 |
+
///////////////////////////////////////////////
|
| 1161 |
+
|
| 1162 |
+
static __OCC_INLINE cudaOccPartitionedGCConfig cudaOccPartitionedGCExpected(
|
| 1163 |
+
const cudaOccDeviceProp *properties,
|
| 1164 |
+
const cudaOccFuncAttributes *attributes)
|
| 1165 |
+
{
|
| 1166 |
+
cudaOccPartitionedGCSupport gcSupport;
|
| 1167 |
+
cudaOccPartitionedGCConfig gcConfig;
|
| 1168 |
+
|
| 1169 |
+
cudaOccPartitionedGlobalCachingModeSupport(&gcSupport, properties);
|
| 1170 |
+
|
| 1171 |
+
gcConfig = attributes->partitionedGCConfig;
|
| 1172 |
+
|
| 1173 |
+
if (gcSupport == PARTITIONED_GC_NOT_SUPPORTED) {
|
| 1174 |
+
gcConfig = PARTITIONED_GC_OFF;
|
| 1175 |
+
}
|
| 1176 |
+
|
| 1177 |
+
return gcConfig;
|
| 1178 |
+
}
|
| 1179 |
+
|
| 1180 |
+
// Warp limit
|
| 1181 |
+
//
|
| 1182 |
+
static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMWarpsLimit(
|
| 1183 |
+
int *limit,
|
| 1184 |
+
cudaOccPartitionedGCConfig gcConfig,
|
| 1185 |
+
const cudaOccDeviceProp *properties,
|
| 1186 |
+
const cudaOccFuncAttributes *attributes,
|
| 1187 |
+
int blockSize)
|
| 1188 |
+
{
|
| 1189 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1190 |
+
int maxWarpsPerSm;
|
| 1191 |
+
int warpsAllocatedPerCTA;
|
| 1192 |
+
int maxBlocks;
|
| 1193 |
+
(void)attributes; // silence unused-variable warning
|
| 1194 |
+
|
| 1195 |
+
if (blockSize > properties->maxThreadsPerBlock) {
|
| 1196 |
+
maxBlocks = 0;
|
| 1197 |
+
}
|
| 1198 |
+
else {
|
| 1199 |
+
maxWarpsPerSm = properties->maxThreadsPerMultiprocessor / properties->warpSize;
|
| 1200 |
+
warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
|
| 1201 |
+
maxBlocks = 0;
|
| 1202 |
+
|
| 1203 |
+
if (gcConfig != PARTITIONED_GC_OFF) {
|
| 1204 |
+
int maxBlocksPerSmPartition;
|
| 1205 |
+
int maxWarpsPerSmPartition;
|
| 1206 |
+
|
| 1207 |
+
// If partitioned global caching is on, then a CTA can only use a SM
|
| 1208 |
+
// partition (a half SM), and thus a half of the warp slots
|
| 1209 |
+
// available per SM
|
| 1210 |
+
//
|
| 1211 |
+
maxWarpsPerSmPartition = maxWarpsPerSm / 2;
|
| 1212 |
+
maxBlocksPerSmPartition = maxWarpsPerSmPartition / warpsAllocatedPerCTA;
|
| 1213 |
+
maxBlocks = maxBlocksPerSmPartition * 2;
|
| 1214 |
+
}
|
| 1215 |
+
// On hardware that supports partitioned global caching, each half SM is
|
| 1216 |
+
// guaranteed to support at least 32 warps (maximum number of warps of a
|
| 1217 |
+
// CTA), so caching will not cause 0 occupancy due to insufficient warp
|
| 1218 |
+
// allocation slots.
|
| 1219 |
+
//
|
| 1220 |
+
else {
|
| 1221 |
+
maxBlocks = maxWarpsPerSm / warpsAllocatedPerCTA;
|
| 1222 |
+
}
|
| 1223 |
+
}
|
| 1224 |
+
|
| 1225 |
+
*limit = maxBlocks;
|
| 1226 |
+
|
| 1227 |
+
return status;
|
| 1228 |
+
}
|
| 1229 |
+
|
| 1230 |
+
// Shared memory limit
|
| 1231 |
+
//
|
| 1232 |
+
static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMSmemLimit(
|
| 1233 |
+
int *limit,
|
| 1234 |
+
cudaOccResult *result,
|
| 1235 |
+
const cudaOccDeviceProp *properties,
|
| 1236 |
+
const cudaOccFuncAttributes *attributes,
|
| 1237 |
+
const cudaOccDeviceState *state,
|
| 1238 |
+
int blockSize,
|
| 1239 |
+
size_t dynamicSmemSize)
|
| 1240 |
+
{
|
| 1241 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1242 |
+
int allocationGranularity;
|
| 1243 |
+
size_t userSmemPreference = 0;
|
| 1244 |
+
size_t totalSmemUsagePerCTA;
|
| 1245 |
+
size_t maxSmemUsagePerCTA;
|
| 1246 |
+
size_t smemAllocatedPerCTA;
|
| 1247 |
+
size_t staticSmemSize;
|
| 1248 |
+
size_t sharedMemPerMultiprocessor;
|
| 1249 |
+
size_t smemLimitPerCTA;
|
| 1250 |
+
int maxBlocks;
|
| 1251 |
+
int dynamicSmemSizeExceeded = 0;
|
| 1252 |
+
int totalSmemSizeExceeded = 0;
|
| 1253 |
+
(void)blockSize; // silence unused-variable warning
|
| 1254 |
+
|
| 1255 |
+
status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
|
| 1256 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1257 |
+
return status;
|
| 1258 |
+
}
|
| 1259 |
+
|
| 1260 |
+
// Obtain the user preferred shared memory size. This setting is ignored if
|
| 1261 |
+
// user requests more shared memory than preferred.
|
| 1262 |
+
//
|
| 1263 |
+
status = cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
|
| 1264 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1265 |
+
return status;
|
| 1266 |
+
}
|
| 1267 |
+
|
| 1268 |
+
staticSmemSize = attributes->sharedSizeBytes + properties->reservedSharedMemPerBlock;
|
| 1269 |
+
totalSmemUsagePerCTA = staticSmemSize + dynamicSmemSize;
|
| 1270 |
+
smemAllocatedPerCTA = __occRoundUp((int)totalSmemUsagePerCTA, (int)allocationGranularity);
|
| 1271 |
+
|
| 1272 |
+
maxSmemUsagePerCTA = staticSmemSize + attributes->maxDynamicSharedSizeBytes;
|
| 1273 |
+
|
| 1274 |
+
dynamicSmemSizeExceeded = 0;
|
| 1275 |
+
totalSmemSizeExceeded = 0;
|
| 1276 |
+
|
| 1277 |
+
// Obtain the user set maximum dynamic size if it exists
|
| 1278 |
+
// If so, the current launch dynamic shared memory must not
|
| 1279 |
+
// exceed the set limit
|
| 1280 |
+
if (attributes->shmemLimitConfig != FUNC_SHMEM_LIMIT_DEFAULT &&
|
| 1281 |
+
dynamicSmemSize > attributes->maxDynamicSharedSizeBytes) {
|
| 1282 |
+
dynamicSmemSizeExceeded = 1;
|
| 1283 |
+
}
|
| 1284 |
+
|
| 1285 |
+
status = cudaOccSMemPerBlock(&smemLimitPerCTA, properties, attributes->shmemLimitConfig, maxSmemUsagePerCTA);
|
| 1286 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1287 |
+
return status;
|
| 1288 |
+
}
|
| 1289 |
+
|
| 1290 |
+
if (smemAllocatedPerCTA > smemLimitPerCTA) {
|
| 1291 |
+
totalSmemSizeExceeded = 1;
|
| 1292 |
+
}
|
| 1293 |
+
|
| 1294 |
+
if (dynamicSmemSizeExceeded || totalSmemSizeExceeded) {
|
| 1295 |
+
maxBlocks = 0;
|
| 1296 |
+
}
|
| 1297 |
+
else {
|
| 1298 |
+
// User requested shared memory limit is used as long as it is greater
|
| 1299 |
+
// than the total shared memory used per CTA, i.e. as long as at least
|
| 1300 |
+
// one CTA can be launched.
|
| 1301 |
+
if (userSmemPreference >= smemAllocatedPerCTA) {
|
| 1302 |
+
sharedMemPerMultiprocessor = userSmemPreference;
|
| 1303 |
+
}
|
| 1304 |
+
else {
|
| 1305 |
+
// On Volta+, user requested shared memory will limit occupancy
|
| 1306 |
+
// if it's less than shared memory per CTA. Otherwise, the
|
| 1307 |
+
// maximum shared memory limit is used.
|
| 1308 |
+
if (properties->computeMajor >= 7) {
|
| 1309 |
+
sharedMemPerMultiprocessor = smemAllocatedPerCTA;
|
| 1310 |
+
status = cudaOccAlignUpShmemSizeVoltaPlus(&sharedMemPerMultiprocessor, properties);
|
| 1311 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1312 |
+
return status;
|
| 1313 |
+
}
|
| 1314 |
+
}
|
| 1315 |
+
else {
|
| 1316 |
+
sharedMemPerMultiprocessor = properties->sharedMemPerMultiprocessor;
|
| 1317 |
+
}
|
| 1318 |
+
}
|
| 1319 |
+
|
| 1320 |
+
if (smemAllocatedPerCTA > 0) {
|
| 1321 |
+
maxBlocks = (int)(sharedMemPerMultiprocessor / smemAllocatedPerCTA);
|
| 1322 |
+
}
|
| 1323 |
+
else {
|
| 1324 |
+
maxBlocks = INT_MAX;
|
| 1325 |
+
}
|
| 1326 |
+
}
|
| 1327 |
+
|
| 1328 |
+
result->allocatedSharedMemPerBlock = smemAllocatedPerCTA;
|
| 1329 |
+
|
| 1330 |
+
*limit = maxBlocks;
|
| 1331 |
+
|
| 1332 |
+
return status;
|
| 1333 |
+
}
|
| 1334 |
+
|
| 1335 |
+
static __OCC_INLINE
|
| 1336 |
+
cudaOccError cudaOccMaxBlocksPerSMRegsLimit(
|
| 1337 |
+
int *limit,
|
| 1338 |
+
cudaOccPartitionedGCConfig *gcConfig,
|
| 1339 |
+
cudaOccResult *result,
|
| 1340 |
+
const cudaOccDeviceProp *properties,
|
| 1341 |
+
const cudaOccFuncAttributes *attributes,
|
| 1342 |
+
int blockSize)
|
| 1343 |
+
{
|
| 1344 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1345 |
+
int allocationGranularity;
|
| 1346 |
+
int warpsAllocatedPerCTA;
|
| 1347 |
+
int regsAllocatedPerCTA;
|
| 1348 |
+
int regsAssumedPerCTA;
|
| 1349 |
+
int regsPerWarp;
|
| 1350 |
+
int regsAllocatedPerWarp;
|
| 1351 |
+
int numSubPartitions;
|
| 1352 |
+
int numRegsPerSubPartition;
|
| 1353 |
+
int numWarpsPerSubPartition;
|
| 1354 |
+
int numWarpsPerSM;
|
| 1355 |
+
int maxBlocks;
|
| 1356 |
+
int maxRegsPerThread;
|
| 1357 |
+
|
| 1358 |
+
status = cudaOccRegAllocationGranularity(
|
| 1359 |
+
&allocationGranularity,
|
| 1360 |
+
properties);
|
| 1361 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1362 |
+
return status;
|
| 1363 |
+
}
|
| 1364 |
+
|
| 1365 |
+
status = cudaOccRegAllocationMaxPerThread(
|
| 1366 |
+
&maxRegsPerThread,
|
| 1367 |
+
properties);
|
| 1368 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1369 |
+
return status;
|
| 1370 |
+
}
|
| 1371 |
+
|
| 1372 |
+
status = cudaOccSubPartitionsPerMultiprocessor(&numSubPartitions, properties);
|
| 1373 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1374 |
+
return status;
|
| 1375 |
+
}
|
| 1376 |
+
|
| 1377 |
+
warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
|
| 1378 |
+
|
| 1379 |
+
// GPUs of compute capability 2.x and higher allocate registers to warps
|
| 1380 |
+
//
|
| 1381 |
+
// Number of regs per warp is regs per thread x warp size, rounded up to
|
| 1382 |
+
// register allocation granularity
|
| 1383 |
+
//
|
| 1384 |
+
regsPerWarp = attributes->numRegs * properties->warpSize;
|
| 1385 |
+
regsAllocatedPerWarp = __occRoundUp(regsPerWarp, allocationGranularity);
|
| 1386 |
+
regsAllocatedPerCTA = regsAllocatedPerWarp * warpsAllocatedPerCTA;
|
| 1387 |
+
|
| 1388 |
+
// Hardware verifies if a launch fits the per-CTA register limit. For
|
| 1389 |
+
// historical reasons, the verification logic assumes register
|
| 1390 |
+
// allocations are made to all partitions simultaneously. Therefore, to
|
| 1391 |
+
// simulate the hardware check, the warp allocation needs to be rounded
|
| 1392 |
+
// up to the number of partitions.
|
| 1393 |
+
//
|
| 1394 |
+
regsAssumedPerCTA = regsAllocatedPerWarp * __occRoundUp(warpsAllocatedPerCTA, numSubPartitions);
|
| 1395 |
+
|
| 1396 |
+
if (properties->regsPerBlock < regsAssumedPerCTA || // Hardware check
|
| 1397 |
+
properties->regsPerBlock < regsAllocatedPerCTA || // Software check
|
| 1398 |
+
attributes->numRegs > maxRegsPerThread) { // Per thread limit check
|
| 1399 |
+
maxBlocks = 0;
|
| 1400 |
+
}
|
| 1401 |
+
else {
|
| 1402 |
+
if (regsAllocatedPerWarp > 0) {
|
| 1403 |
+
// Registers are allocated in each sub-partition. The max number
|
| 1404 |
+
// of warps that can fit on an SM is equal to the max number of
|
| 1405 |
+
// warps per sub-partition x number of sub-partitions.
|
| 1406 |
+
//
|
| 1407 |
+
numRegsPerSubPartition = properties->regsPerMultiprocessor / numSubPartitions;
|
| 1408 |
+
numWarpsPerSubPartition = numRegsPerSubPartition / regsAllocatedPerWarp;
|
| 1409 |
+
|
| 1410 |
+
maxBlocks = 0;
|
| 1411 |
+
|
| 1412 |
+
if (*gcConfig != PARTITIONED_GC_OFF) {
|
| 1413 |
+
int numSubPartitionsPerSmPartition;
|
| 1414 |
+
int numWarpsPerSmPartition;
|
| 1415 |
+
int maxBlocksPerSmPartition;
|
| 1416 |
+
|
| 1417 |
+
// If partitioned global caching is on, then a CTA can only
|
| 1418 |
+
// use a half SM, and thus a half of the registers available
|
| 1419 |
+
// per SM
|
| 1420 |
+
//
|
| 1421 |
+
numSubPartitionsPerSmPartition = numSubPartitions / 2;
|
| 1422 |
+
numWarpsPerSmPartition = numWarpsPerSubPartition * numSubPartitionsPerSmPartition;
|
| 1423 |
+
maxBlocksPerSmPartition = numWarpsPerSmPartition / warpsAllocatedPerCTA;
|
| 1424 |
+
maxBlocks = maxBlocksPerSmPartition * 2;
|
| 1425 |
+
}
|
| 1426 |
+
|
| 1427 |
+
// Try again if partitioned global caching is not enabled, or if
|
| 1428 |
+
// the CTA cannot fit on the SM with caching on (maxBlocks == 0). In the latter
|
| 1429 |
+
// case, the device will automatically turn off caching, except
|
| 1430 |
+
// if the user forces enablement via PARTITIONED_GC_ON_STRICT to calculate
|
| 1431 |
+
// occupancy and launch configuration.
|
| 1432 |
+
//
|
| 1433 |
+
if (maxBlocks == 0 && *gcConfig != PARTITIONED_GC_ON_STRICT) {
|
| 1434 |
+
// In case *gcConfig was PARTITIONED_GC_ON flip it OFF since
|
| 1435 |
+
// this is what it will be if we spread CTA across partitions.
|
| 1436 |
+
//
|
| 1437 |
+
*gcConfig = PARTITIONED_GC_OFF;
|
| 1438 |
+
numWarpsPerSM = numWarpsPerSubPartition * numSubPartitions;
|
| 1439 |
+
maxBlocks = numWarpsPerSM / warpsAllocatedPerCTA;
|
| 1440 |
+
}
|
| 1441 |
+
}
|
| 1442 |
+
else {
|
| 1443 |
+
maxBlocks = INT_MAX;
|
| 1444 |
+
}
|
| 1445 |
+
}
|
| 1446 |
+
|
| 1447 |
+
|
| 1448 |
+
result->allocatedRegistersPerBlock = regsAllocatedPerCTA;
|
| 1449 |
+
|
| 1450 |
+
*limit = maxBlocks;
|
| 1451 |
+
|
| 1452 |
+
return status;
|
| 1453 |
+
}
|
| 1454 |
+
|
| 1455 |
+
// Barrier limit
|
| 1456 |
+
//
|
| 1457 |
+
static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMBlockBarrierLimit(
|
| 1458 |
+
int *limit,
|
| 1459 |
+
int ctaLimitBlocks,
|
| 1460 |
+
const cudaOccFuncAttributes *attributes)
|
| 1461 |
+
{
|
| 1462 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1463 |
+
int numBarriersAvailable = ctaLimitBlocks * 2;
|
| 1464 |
+
int numBarriersUsed = attributes->numBlockBarriers;
|
| 1465 |
+
int maxBlocks = INT_MAX;
|
| 1466 |
+
|
| 1467 |
+
if (numBarriersUsed) {
|
| 1468 |
+
maxBlocks = numBarriersAvailable / numBarriersUsed;
|
| 1469 |
+
}
|
| 1470 |
+
|
| 1471 |
+
*limit = maxBlocks;
|
| 1472 |
+
|
| 1473 |
+
return status;
|
| 1474 |
+
}
|
| 1475 |
+
|
| 1476 |
+
///////////////////////////////////
|
| 1477 |
+
// API Implementations //
|
| 1478 |
+
///////////////////////////////////
|
| 1479 |
+
|
| 1480 |
+
static __OCC_INLINE
|
| 1481 |
+
cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
|
| 1482 |
+
cudaOccResult *result,
|
| 1483 |
+
const cudaOccDeviceProp *properties,
|
| 1484 |
+
const cudaOccFuncAttributes *attributes,
|
| 1485 |
+
const cudaOccDeviceState *state,
|
| 1486 |
+
int blockSize,
|
| 1487 |
+
size_t dynamicSmemSize)
|
| 1488 |
+
{
|
| 1489 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1490 |
+
int ctaLimitWarps = 0;
|
| 1491 |
+
int ctaLimitBlocks = 0;
|
| 1492 |
+
int ctaLimitSMem = 0;
|
| 1493 |
+
int ctaLimitRegs = 0;
|
| 1494 |
+
int ctaLimitBars = 0;
|
| 1495 |
+
int ctaLimit = 0;
|
| 1496 |
+
unsigned int limitingFactors = 0;
|
| 1497 |
+
|
| 1498 |
+
cudaOccPartitionedGCConfig gcConfig = PARTITIONED_GC_OFF;
|
| 1499 |
+
|
| 1500 |
+
if (!result || !properties || !attributes || !state || blockSize <= 0) {
|
| 1501 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1502 |
+
}
|
| 1503 |
+
|
| 1504 |
+
///////////////////////////
|
| 1505 |
+
// Check user input
|
| 1506 |
+
///////////////////////////
|
| 1507 |
+
|
| 1508 |
+
status = cudaOccInputCheck(properties, attributes, state);
|
| 1509 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1510 |
+
return status;
|
| 1511 |
+
}
|
| 1512 |
+
|
| 1513 |
+
///////////////////////////
|
| 1514 |
+
// Initialization
|
| 1515 |
+
///////////////////////////
|
| 1516 |
+
|
| 1517 |
+
gcConfig = cudaOccPartitionedGCExpected(properties, attributes);
|
| 1518 |
+
|
| 1519 |
+
///////////////////////////
|
| 1520 |
+
// Compute occupancy
|
| 1521 |
+
///////////////////////////
|
| 1522 |
+
|
| 1523 |
+
// Limits due to registers/SM
|
| 1524 |
+
// Also compute if partitioned global caching has to be turned off
|
| 1525 |
+
//
|
| 1526 |
+
status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegs, &gcConfig, result, properties, attributes, blockSize);
|
| 1527 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1528 |
+
return status;
|
| 1529 |
+
}
|
| 1530 |
+
|
| 1531 |
+
// SMs on GP100 (6.0) have 2 subpartitions, while those on GP10x have 4.
|
| 1532 |
+
// As a result, an SM on GP100 may be able to run more CTAs than the one on GP10x.
|
| 1533 |
+
// For forward compatibility within Pascal family, if a function cannot run on GP10x (maxBlock == 0),
|
| 1534 |
+
// we do not let it run on any Pascal processor, even though it may be able to run on GP100.
|
| 1535 |
+
// Therefore, we check the occupancy on GP10x when it can run on GP100
|
| 1536 |
+
//
|
| 1537 |
+
if (properties->computeMajor == 6 && properties->computeMinor == 0 && ctaLimitRegs) {
|
| 1538 |
+
cudaOccDeviceProp propertiesGP10x;
|
| 1539 |
+
cudaOccPartitionedGCConfig gcConfigGP10x = gcConfig;
|
| 1540 |
+
int ctaLimitRegsGP10x = 0;
|
| 1541 |
+
|
| 1542 |
+
// Set up properties for GP10x
|
| 1543 |
+
memcpy(&propertiesGP10x, properties, sizeof(propertiesGP10x));
|
| 1544 |
+
propertiesGP10x.computeMinor = 1;
|
| 1545 |
+
|
| 1546 |
+
status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegsGP10x, &gcConfigGP10x, result, &propertiesGP10x, attributes, blockSize);
|
| 1547 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1548 |
+
return status;
|
| 1549 |
+
}
|
| 1550 |
+
|
| 1551 |
+
if (ctaLimitRegsGP10x == 0) {
|
| 1552 |
+
ctaLimitRegs = 0;
|
| 1553 |
+
}
|
| 1554 |
+
}
|
| 1555 |
+
|
| 1556 |
+
// Limits due to warps/SM
|
| 1557 |
+
//
|
| 1558 |
+
status = cudaOccMaxBlocksPerSMWarpsLimit(&ctaLimitWarps, gcConfig, properties, attributes, blockSize);
|
| 1559 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1560 |
+
return status;
|
| 1561 |
+
}
|
| 1562 |
+
|
| 1563 |
+
// Limits due to blocks/SM
|
| 1564 |
+
//
|
| 1565 |
+
status = cudaOccMaxBlocksPerMultiprocessor(&ctaLimitBlocks, properties);
|
| 1566 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1567 |
+
return status;
|
| 1568 |
+
}
|
| 1569 |
+
|
| 1570 |
+
// Limits due to shared memory/SM
|
| 1571 |
+
//
|
| 1572 |
+
status = cudaOccMaxBlocksPerSMSmemLimit(&ctaLimitSMem, result, properties, attributes, state, blockSize, dynamicSmemSize);
|
| 1573 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1574 |
+
return status;
|
| 1575 |
+
}
|
| 1576 |
+
|
| 1577 |
+
///////////////////////////
|
| 1578 |
+
// Overall occupancy
|
| 1579 |
+
///////////////////////////
|
| 1580 |
+
|
| 1581 |
+
// Overall limit is min() of limits due to above reasons
|
| 1582 |
+
//
|
| 1583 |
+
ctaLimit = __occMin(ctaLimitRegs, __occMin(ctaLimitSMem, __occMin(ctaLimitWarps, ctaLimitBlocks)));
|
| 1584 |
+
|
| 1585 |
+
// Determine occupancy limiting factors
|
| 1586 |
+
//
|
| 1587 |
+
if (ctaLimit == ctaLimitWarps) {
|
| 1588 |
+
limitingFactors |= OCC_LIMIT_WARPS;
|
| 1589 |
+
}
|
| 1590 |
+
if (ctaLimit == ctaLimitRegs) {
|
| 1591 |
+
limitingFactors |= OCC_LIMIT_REGISTERS;
|
| 1592 |
+
}
|
| 1593 |
+
if (ctaLimit == ctaLimitSMem) {
|
| 1594 |
+
limitingFactors |= OCC_LIMIT_SHARED_MEMORY;
|
| 1595 |
+
}
|
| 1596 |
+
if (ctaLimit == ctaLimitBlocks) {
|
| 1597 |
+
limitingFactors |= OCC_LIMIT_BLOCKS;
|
| 1598 |
+
}
|
| 1599 |
+
|
| 1600 |
+
// For Hopper onwards compute the limits to occupancy based on block barrier count
|
| 1601 |
+
//
|
| 1602 |
+
if (properties->computeMajor >= 9 && attributes->numBlockBarriers > 0) {
|
| 1603 |
+
// Limits due to barrier/SM
|
| 1604 |
+
//
|
| 1605 |
+
status = cudaOccMaxBlocksPerSMBlockBarrierLimit(&ctaLimitBars, ctaLimitBlocks, attributes);
|
| 1606 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1607 |
+
return status;
|
| 1608 |
+
}
|
| 1609 |
+
|
| 1610 |
+
// Recompute overall limit based on barrier/SM
|
| 1611 |
+
//
|
| 1612 |
+
ctaLimit = __occMin(ctaLimitBars, ctaLimit);
|
| 1613 |
+
|
| 1614 |
+
// Determine if this is occupancy limiting factor
|
| 1615 |
+
//
|
| 1616 |
+
if (ctaLimit == ctaLimitBars) {
|
| 1617 |
+
limitingFactors |= OCC_LIMIT_BARRIERS;
|
| 1618 |
+
}
|
| 1619 |
+
}
|
| 1620 |
+
else {
|
| 1621 |
+
ctaLimitBars = INT_MAX;
|
| 1622 |
+
}
|
| 1623 |
+
|
| 1624 |
+
// Fill in the return values
|
| 1625 |
+
//
|
| 1626 |
+
result->limitingFactors = limitingFactors;
|
| 1627 |
+
|
| 1628 |
+
result->blockLimitRegs = ctaLimitRegs;
|
| 1629 |
+
result->blockLimitSharedMem = ctaLimitSMem;
|
| 1630 |
+
result->blockLimitWarps = ctaLimitWarps;
|
| 1631 |
+
result->blockLimitBlocks = ctaLimitBlocks;
|
| 1632 |
+
result->blockLimitBarriers = ctaLimitBars;
|
| 1633 |
+
result->partitionedGCConfig = gcConfig;
|
| 1634 |
+
|
| 1635 |
+
// Final occupancy
|
| 1636 |
+
result->activeBlocksPerMultiprocessor = ctaLimit;
|
| 1637 |
+
|
| 1638 |
+
return CUDA_OCC_SUCCESS;
|
| 1639 |
+
}
|
| 1640 |
+
|
| 1641 |
+
static __OCC_INLINE
|
| 1642 |
+
cudaOccError cudaOccAvailableDynamicSMemPerBlock(
|
| 1643 |
+
size_t *bytesAvailable,
|
| 1644 |
+
const cudaOccDeviceProp *properties,
|
| 1645 |
+
const cudaOccFuncAttributes *attributes,
|
| 1646 |
+
const cudaOccDeviceState *state,
|
| 1647 |
+
int numBlocks,
|
| 1648 |
+
int blockSize)
|
| 1649 |
+
{
|
| 1650 |
+
int allocationGranularity;
|
| 1651 |
+
size_t smemLimitPerBlock;
|
| 1652 |
+
size_t smemAvailableForDynamic;
|
| 1653 |
+
size_t userSmemPreference = 0;
|
| 1654 |
+
size_t sharedMemPerMultiprocessor;
|
| 1655 |
+
cudaOccResult result;
|
| 1656 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1657 |
+
|
| 1658 |
+
if (numBlocks <= 0)
|
| 1659 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1660 |
+
|
| 1661 |
+
// First compute occupancy of potential kernel launch.
|
| 1662 |
+
//
|
| 1663 |
+
status = cudaOccMaxActiveBlocksPerMultiprocessor(&result, properties, attributes, state, blockSize, 0);
|
| 1664 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1665 |
+
return status;
|
| 1666 |
+
}
|
| 1667 |
+
// Check if occupancy is achievable given user requested number of blocks.
|
| 1668 |
+
//
|
| 1669 |
+
if (result.activeBlocksPerMultiprocessor < numBlocks) {
|
| 1670 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1671 |
+
}
|
| 1672 |
+
|
| 1673 |
+
status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
|
| 1674 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1675 |
+
return status;
|
| 1676 |
+
}
|
| 1677 |
+
|
| 1678 |
+
// Return the per block shared memory limit based on function config.
|
| 1679 |
+
//
|
| 1680 |
+
status = cudaOccSMemPerBlock(&smemLimitPerBlock, properties, attributes->shmemLimitConfig, properties->sharedMemPerMultiprocessor);
|
| 1681 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1682 |
+
return status;
|
| 1683 |
+
}
|
| 1684 |
+
|
| 1685 |
+
// If there is only a single block needed per SM, then the user preference can be ignored and the fully SW
|
| 1686 |
+
// limit is allowed to be used as shared memory otherwise if more than one block is needed, then the user
|
| 1687 |
+
// preference sets the total limit of available shared memory.
|
| 1688 |
+
//
|
| 1689 |
+
cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
|
| 1690 |
+
if (numBlocks == 1) {
|
| 1691 |
+
sharedMemPerMultiprocessor = smemLimitPerBlock;
|
| 1692 |
+
}
|
| 1693 |
+
else {
|
| 1694 |
+
if (!userSmemPreference) {
|
| 1695 |
+
userSmemPreference = 1 ;
|
| 1696 |
+
status = cudaOccAlignUpShmemSizeVoltaPlus(&userSmemPreference, properties);
|
| 1697 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1698 |
+
return status;
|
| 1699 |
+
}
|
| 1700 |
+
}
|
| 1701 |
+
sharedMemPerMultiprocessor = userSmemPreference;
|
| 1702 |
+
}
|
| 1703 |
+
|
| 1704 |
+
// Compute total shared memory available per SM
|
| 1705 |
+
//
|
| 1706 |
+
smemAvailableForDynamic = sharedMemPerMultiprocessor / numBlocks;
|
| 1707 |
+
smemAvailableForDynamic = (smemAvailableForDynamic / allocationGranularity) * allocationGranularity;
|
| 1708 |
+
|
| 1709 |
+
// Cap shared memory
|
| 1710 |
+
//
|
| 1711 |
+
if (smemAvailableForDynamic > smemLimitPerBlock) {
|
| 1712 |
+
smemAvailableForDynamic = smemLimitPerBlock;
|
| 1713 |
+
}
|
| 1714 |
+
|
| 1715 |
+
// Now compute dynamic shared memory size
|
| 1716 |
+
smemAvailableForDynamic = smemAvailableForDynamic - attributes->sharedSizeBytes;
|
| 1717 |
+
|
| 1718 |
+
// Cap computed dynamic SM by user requested limit specified via cuFuncSetAttribute()
|
| 1719 |
+
//
|
| 1720 |
+
if (smemAvailableForDynamic > attributes->maxDynamicSharedSizeBytes)
|
| 1721 |
+
smemAvailableForDynamic = attributes->maxDynamicSharedSizeBytes;
|
| 1722 |
+
|
| 1723 |
+
*bytesAvailable = smemAvailableForDynamic;
|
| 1724 |
+
return CUDA_OCC_SUCCESS;
|
| 1725 |
+
}
|
| 1726 |
+
|
| 1727 |
+
static __OCC_INLINE
|
| 1728 |
+
cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
|
| 1729 |
+
int *minGridSize,
|
| 1730 |
+
int *blockSize,
|
| 1731 |
+
const cudaOccDeviceProp *properties,
|
| 1732 |
+
const cudaOccFuncAttributes *attributes,
|
| 1733 |
+
const cudaOccDeviceState *state,
|
| 1734 |
+
size_t (*blockSizeToDynamicSMemSize)(int),
|
| 1735 |
+
size_t dynamicSMemSize)
|
| 1736 |
+
{
|
| 1737 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1738 |
+
cudaOccResult result;
|
| 1739 |
+
|
| 1740 |
+
// Limits
|
| 1741 |
+
int occupancyLimit;
|
| 1742 |
+
int granularity;
|
| 1743 |
+
int blockSizeLimit;
|
| 1744 |
+
|
| 1745 |
+
// Recorded maximum
|
| 1746 |
+
int maxBlockSize = 0;
|
| 1747 |
+
int numBlocks = 0;
|
| 1748 |
+
int maxOccupancy = 0;
|
| 1749 |
+
|
| 1750 |
+
// Temporary
|
| 1751 |
+
int blockSizeToTryAligned;
|
| 1752 |
+
int blockSizeToTry;
|
| 1753 |
+
int blockSizeLimitAligned;
|
| 1754 |
+
int occupancyInBlocks;
|
| 1755 |
+
int occupancyInThreads;
|
| 1756 |
+
|
| 1757 |
+
///////////////////////////
|
| 1758 |
+
// Check user input
|
| 1759 |
+
///////////////////////////
|
| 1760 |
+
|
| 1761 |
+
if (!minGridSize || !blockSize || !properties || !attributes || !state) {
|
| 1762 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1763 |
+
}
|
| 1764 |
+
|
| 1765 |
+
status = cudaOccInputCheck(properties, attributes, state);
|
| 1766 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1767 |
+
return status;
|
| 1768 |
+
}
|
| 1769 |
+
|
| 1770 |
+
/////////////////////////////////////////////////////////////////////////////////
|
| 1771 |
+
// Try each block size, and pick the block size with maximum occupancy
|
| 1772 |
+
/////////////////////////////////////////////////////////////////////////////////
|
| 1773 |
+
|
| 1774 |
+
occupancyLimit = properties->maxThreadsPerMultiprocessor;
|
| 1775 |
+
granularity = properties->warpSize;
|
| 1776 |
+
|
| 1777 |
+
blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
|
| 1778 |
+
blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
|
| 1779 |
+
|
| 1780 |
+
for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
|
| 1781 |
+
blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
|
| 1782 |
+
|
| 1783 |
+
// Ignore dynamicSMemSize if the user provides a mapping
|
| 1784 |
+
//
|
| 1785 |
+
if (blockSizeToDynamicSMemSize) {
|
| 1786 |
+
dynamicSMemSize = (*blockSizeToDynamicSMemSize)(blockSizeToTry);
|
| 1787 |
+
}
|
| 1788 |
+
|
| 1789 |
+
status = cudaOccMaxActiveBlocksPerMultiprocessor(
|
| 1790 |
+
&result,
|
| 1791 |
+
properties,
|
| 1792 |
+
attributes,
|
| 1793 |
+
state,
|
| 1794 |
+
blockSizeToTry,
|
| 1795 |
+
dynamicSMemSize);
|
| 1796 |
+
|
| 1797 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1798 |
+
return status;
|
| 1799 |
+
}
|
| 1800 |
+
|
| 1801 |
+
occupancyInBlocks = result.activeBlocksPerMultiprocessor;
|
| 1802 |
+
occupancyInThreads = blockSizeToTry * occupancyInBlocks;
|
| 1803 |
+
|
| 1804 |
+
if (occupancyInThreads > maxOccupancy) {
|
| 1805 |
+
maxBlockSize = blockSizeToTry;
|
| 1806 |
+
numBlocks = occupancyInBlocks;
|
| 1807 |
+
maxOccupancy = occupancyInThreads;
|
| 1808 |
+
}
|
| 1809 |
+
|
| 1810 |
+
// Early out if we have reached the maximum
|
| 1811 |
+
//
|
| 1812 |
+
if (occupancyLimit == maxOccupancy) {
|
| 1813 |
+
break;
|
| 1814 |
+
}
|
| 1815 |
+
}
|
| 1816 |
+
|
| 1817 |
+
///////////////////////////
|
| 1818 |
+
// Return best available
|
| 1819 |
+
///////////////////////////
|
| 1820 |
+
|
| 1821 |
+
// Suggested min grid size to achieve a full machine launch
|
| 1822 |
+
//
|
| 1823 |
+
*minGridSize = numBlocks * properties->numSms;
|
| 1824 |
+
*blockSize = maxBlockSize;
|
| 1825 |
+
|
| 1826 |
+
return status;
|
| 1827 |
+
}
|
| 1828 |
+
|
| 1829 |
+
|
| 1830 |
+
#if defined(__cplusplus)
|
| 1831 |
+
|
| 1832 |
+
namespace {
|
| 1833 |
+
|
| 1834 |
+
__OCC_INLINE
|
| 1835 |
+
cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
|
| 1836 |
+
int *minGridSize,
|
| 1837 |
+
int *blockSize,
|
| 1838 |
+
const cudaOccDeviceProp *properties,
|
| 1839 |
+
const cudaOccFuncAttributes *attributes,
|
| 1840 |
+
const cudaOccDeviceState *state,
|
| 1841 |
+
size_t dynamicSMemSize)
|
| 1842 |
+
{
|
| 1843 |
+
return cudaOccMaxPotentialOccupancyBlockSize(
|
| 1844 |
+
minGridSize,
|
| 1845 |
+
blockSize,
|
| 1846 |
+
properties,
|
| 1847 |
+
attributes,
|
| 1848 |
+
state,
|
| 1849 |
+
NULL,
|
| 1850 |
+
dynamicSMemSize);
|
| 1851 |
+
}
|
| 1852 |
+
|
| 1853 |
+
template <typename UnaryFunction>
|
| 1854 |
+
__OCC_INLINE
|
| 1855 |
+
cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
|
| 1856 |
+
int *minGridSize,
|
| 1857 |
+
int *blockSize,
|
| 1858 |
+
const cudaOccDeviceProp *properties,
|
| 1859 |
+
const cudaOccFuncAttributes *attributes,
|
| 1860 |
+
const cudaOccDeviceState *state,
|
| 1861 |
+
UnaryFunction blockSizeToDynamicSMemSize)
|
| 1862 |
+
{
|
| 1863 |
+
cudaOccError status = CUDA_OCC_SUCCESS;
|
| 1864 |
+
cudaOccResult result;
|
| 1865 |
+
|
| 1866 |
+
// Limits
|
| 1867 |
+
int occupancyLimit;
|
| 1868 |
+
int granularity;
|
| 1869 |
+
int blockSizeLimit;
|
| 1870 |
+
|
| 1871 |
+
// Recorded maximum
|
| 1872 |
+
int maxBlockSize = 0;
|
| 1873 |
+
int numBlocks = 0;
|
| 1874 |
+
int maxOccupancy = 0;
|
| 1875 |
+
|
| 1876 |
+
// Temporary
|
| 1877 |
+
int blockSizeToTryAligned;
|
| 1878 |
+
int blockSizeToTry;
|
| 1879 |
+
int blockSizeLimitAligned;
|
| 1880 |
+
int occupancyInBlocks;
|
| 1881 |
+
int occupancyInThreads;
|
| 1882 |
+
size_t dynamicSMemSize;
|
| 1883 |
+
|
| 1884 |
+
///////////////////////////
|
| 1885 |
+
// Check user input
|
| 1886 |
+
///////////////////////////
|
| 1887 |
+
|
| 1888 |
+
if (!minGridSize || !blockSize || !properties || !attributes || !state) {
|
| 1889 |
+
return CUDA_OCC_ERROR_INVALID_INPUT;
|
| 1890 |
+
}
|
| 1891 |
+
|
| 1892 |
+
status = cudaOccInputCheck(properties, attributes, state);
|
| 1893 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1894 |
+
return status;
|
| 1895 |
+
}
|
| 1896 |
+
|
| 1897 |
+
/////////////////////////////////////////////////////////////////////////////////
|
| 1898 |
+
// Try each block size, and pick the block size with maximum occupancy
|
| 1899 |
+
/////////////////////////////////////////////////////////////////////////////////
|
| 1900 |
+
|
| 1901 |
+
occupancyLimit = properties->maxThreadsPerMultiprocessor;
|
| 1902 |
+
granularity = properties->warpSize;
|
| 1903 |
+
blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
|
| 1904 |
+
blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
|
| 1905 |
+
|
| 1906 |
+
for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
|
| 1907 |
+
blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
|
| 1908 |
+
|
| 1909 |
+
dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry);
|
| 1910 |
+
|
| 1911 |
+
status = cudaOccMaxActiveBlocksPerMultiprocessor(
|
| 1912 |
+
&result,
|
| 1913 |
+
properties,
|
| 1914 |
+
attributes,
|
| 1915 |
+
state,
|
| 1916 |
+
blockSizeToTry,
|
| 1917 |
+
dynamicSMemSize);
|
| 1918 |
+
|
| 1919 |
+
if (status != CUDA_OCC_SUCCESS) {
|
| 1920 |
+
return status;
|
| 1921 |
+
}
|
| 1922 |
+
|
| 1923 |
+
occupancyInBlocks = result.activeBlocksPerMultiprocessor;
|
| 1924 |
+
|
| 1925 |
+
occupancyInThreads = blockSizeToTry * occupancyInBlocks;
|
| 1926 |
+
|
| 1927 |
+
if (occupancyInThreads > maxOccupancy) {
|
| 1928 |
+
maxBlockSize = blockSizeToTry;
|
| 1929 |
+
numBlocks = occupancyInBlocks;
|
| 1930 |
+
maxOccupancy = occupancyInThreads;
|
| 1931 |
+
}
|
| 1932 |
+
|
| 1933 |
+
// Early out if we have reached the maximum
|
| 1934 |
+
//
|
| 1935 |
+
if (occupancyLimit == maxOccupancy) {
|
| 1936 |
+
break;
|
| 1937 |
+
}
|
| 1938 |
+
}
|
| 1939 |
+
|
| 1940 |
+
///////////////////////////
|
| 1941 |
+
// Return best available
|
| 1942 |
+
///////////////////////////
|
| 1943 |
+
|
| 1944 |
+
// Suggested min grid size to achieve a full machine launch
|
| 1945 |
+
//
|
| 1946 |
+
*minGridSize = numBlocks * properties->numSms;
|
| 1947 |
+
*blockSize = maxBlockSize;
|
| 1948 |
+
|
| 1949 |
+
return status;
|
| 1950 |
+
}
|
| 1951 |
+
|
| 1952 |
+
} // namespace anonymous
|
| 1953 |
+
|
| 1954 |
+
#endif /*__cplusplus */
|
| 1955 |
+
|
| 1956 |
+
#undef __OCC_INLINE
|
| 1957 |
+
|
| 1958 |
+
#endif /*__cuda_occupancy_h__*/
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_VDPAU_INTEROP_H__)
|
| 51 |
+
#define __CUDA_VDPAU_INTEROP_H__
|
| 52 |
+
|
| 53 |
+
#include "cuda_runtime_api.h"
|
| 54 |
+
|
| 55 |
+
#include <vdpau/vdpau.h>
|
| 56 |
+
|
| 57 |
+
#if defined(__cplusplus)
|
| 58 |
+
extern "C" {
|
| 59 |
+
#endif /* __cplusplus */
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* \addtogroup CUDART_VDPAU VDPAU Interoperability
|
| 63 |
+
* This section describes the VDPAU interoperability functions of the CUDA
|
| 64 |
+
* runtime application programming interface.
|
| 65 |
+
*
|
| 66 |
+
* @{
|
| 67 |
+
*/
|
| 68 |
+
|
| 69 |
+
/**
|
| 70 |
+
* \brief Gets the CUDA device associated with a VdpDevice.
|
| 71 |
+
*
|
| 72 |
+
* Returns the CUDA device associated with a VdpDevice, if applicable.
|
| 73 |
+
*
|
| 74 |
+
* \param device - Returns the device associated with vdpDevice, or -1 if
|
| 75 |
+
* the device associated with vdpDevice is not a compute device.
|
| 76 |
+
* \param vdpDevice - A VdpDevice handle
|
| 77 |
+
* \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
|
| 78 |
+
*
|
| 79 |
+
* \return
|
| 80 |
+
* ::cudaSuccess
|
| 81 |
+
* \notefnerr
|
| 82 |
+
*
|
| 83 |
+
* \sa
|
| 84 |
+
* ::cudaVDPAUSetVDPAUDevice,
|
| 85 |
+
* ::cuVDPAUGetDevice
|
| 86 |
+
*/
|
| 87 |
+
extern __host__ cudaError_t CUDARTAPI cudaVDPAUGetDevice(int *device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 88 |
+
|
| 89 |
+
/**
|
| 90 |
+
* \brief Sets a CUDA device to use VDPAU interoperability
|
| 91 |
+
*
|
| 92 |
+
* Records \p vdpDevice as the VdpDevice for VDPAU interoperability
|
| 93 |
+
* with the CUDA device \p device and sets \p device as the current
|
| 94 |
+
* device for the calling host thread.
|
| 95 |
+
*
|
| 96 |
+
* This function will immediately initialize the primary context on
|
| 97 |
+
* \p device if needed.
|
| 98 |
+
*
|
| 99 |
+
* If \p device has already been initialized then this call will fail
|
| 100 |
+
* with the error ::cudaErrorSetOnActiveProcess. In this case it is
|
| 101 |
+
* necessary to reset \p device using ::cudaDeviceReset() before
|
| 102 |
+
* VDPAU interoperability on \p device may be enabled.
|
| 103 |
+
*
|
| 104 |
+
* \param device - Device to use for VDPAU interoperability
|
| 105 |
+
* \param vdpDevice - The VdpDevice to interoperate with
|
| 106 |
+
* \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
|
| 107 |
+
*
|
| 108 |
+
* \return
|
| 109 |
+
* ::cudaSuccess,
|
| 110 |
+
* ::cudaErrorInvalidDevice,
|
| 111 |
+
* ::cudaErrorSetOnActiveProcess
|
| 112 |
+
* \notefnerr
|
| 113 |
+
*
|
| 114 |
+
* \sa ::cudaGraphicsVDPAURegisterVideoSurface,
|
| 115 |
+
* ::cudaGraphicsVDPAURegisterOutputSurface,
|
| 116 |
+
* ::cudaDeviceReset
|
| 117 |
+
*/
|
| 118 |
+
extern __host__ cudaError_t CUDARTAPI cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
|
| 119 |
+
|
| 120 |
+
/**
|
| 121 |
+
* \brief Register a VdpVideoSurface object
|
| 122 |
+
*
|
| 123 |
+
* Registers the VdpVideoSurface specified by \p vdpSurface for access by CUDA.
|
| 124 |
+
* A handle to the registered object is returned as \p resource.
|
| 125 |
+
* The surface's intended usage is specified using \p flags, as follows:
|
| 126 |
+
*
|
| 127 |
+
* - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
|
| 128 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 129 |
+
* read from and written to by CUDA. This is the default value.
|
| 130 |
+
* - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
|
| 131 |
+
* will not write to this resource.
|
| 132 |
+
* - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
|
| 133 |
+
* CUDA will not read from this resource and will write over the
|
| 134 |
+
* entire contents of the resource, so none of the data previously
|
| 135 |
+
* stored in the resource will be preserved.
|
| 136 |
+
*
|
| 137 |
+
* \param resource - Pointer to the returned object handle
|
| 138 |
+
* \param vdpSurface - VDPAU object to be registered
|
| 139 |
+
* \param flags - Map flags
|
| 140 |
+
*
|
| 141 |
+
* \return
|
| 142 |
+
* ::cudaSuccess,
|
| 143 |
+
* ::cudaErrorInvalidDevice,
|
| 144 |
+
* ::cudaErrorInvalidValue,
|
| 145 |
+
* ::cudaErrorInvalidResourceHandle,
|
| 146 |
+
* ::cudaErrorUnknown
|
| 147 |
+
* \notefnerr
|
| 148 |
+
*
|
| 149 |
+
* \sa
|
| 150 |
+
* ::cudaVDPAUSetVDPAUDevice,
|
| 151 |
+
* ::cudaGraphicsUnregisterResource,
|
| 152 |
+
* ::cudaGraphicsSubResourceGetMappedArray,
|
| 153 |
+
* ::cuGraphicsVDPAURegisterVideoSurface
|
| 154 |
+
*/
|
| 155 |
+
extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterVideoSurface(struct cudaGraphicsResource **resource, VdpVideoSurface vdpSurface, unsigned int flags);
|
| 156 |
+
|
| 157 |
+
/**
|
| 158 |
+
* \brief Register a VdpOutputSurface object
|
| 159 |
+
*
|
| 160 |
+
* Registers the VdpOutputSurface specified by \p vdpSurface for access by CUDA.
|
| 161 |
+
* A handle to the registered object is returned as \p resource.
|
| 162 |
+
* The surface's intended usage is specified using \p flags, as follows:
|
| 163 |
+
*
|
| 164 |
+
* - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
|
| 165 |
+
* resource will be used. It is therefore assumed that this resource will be
|
| 166 |
+
* read from and written to by CUDA. This is the default value.
|
| 167 |
+
* - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
|
| 168 |
+
* will not write to this resource.
|
| 169 |
+
* - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
|
| 170 |
+
* CUDA will not read from this resource and will write over the
|
| 171 |
+
* entire contents of the resource, so none of the data previously
|
| 172 |
+
* stored in the resource will be preserved.
|
| 173 |
+
*
|
| 174 |
+
* \param resource - Pointer to the returned object handle
|
| 175 |
+
* \param vdpSurface - VDPAU object to be registered
|
| 176 |
+
* \param flags - Map flags
|
| 177 |
+
*
|
| 178 |
+
* \return
|
| 179 |
+
* ::cudaSuccess,
|
| 180 |
+
* ::cudaErrorInvalidDevice,
|
| 181 |
+
* ::cudaErrorInvalidValue,
|
| 182 |
+
* ::cudaErrorInvalidResourceHandle,
|
| 183 |
+
* ::cudaErrorUnknown
|
| 184 |
+
* \notefnerr
|
| 185 |
+
*
|
| 186 |
+
* \sa
|
| 187 |
+
* ::cudaVDPAUSetVDPAUDevice,
|
| 188 |
+
* ::cudaGraphicsUnregisterResource,
|
| 189 |
+
* ::cudaGraphicsSubResourceGetMappedArray,
|
| 190 |
+
* ::cuGraphicsVDPAURegisterOutputSurface
|
| 191 |
+
*/
|
| 192 |
+
extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterOutputSurface(struct cudaGraphicsResource **resource, VdpOutputSurface vdpSurface, unsigned int flags);
|
| 193 |
+
|
| 194 |
+
/** @} */ /* END CUDART_VDPAU */
|
| 195 |
+
|
| 196 |
+
#if defined(__cplusplus)
|
| 197 |
+
}
|
| 198 |
+
#endif /* __cplusplus */
|
| 199 |
+
|
| 200 |
+
#endif /* __CUDA_VDPAU_INTEROP_H__ */
|
| 201 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#ifndef __CUDART_PLATFORM_H__
|
| 51 |
+
#define __CUDART_PLATFORM_H__
|
| 52 |
+
|
| 53 |
+
#if ((defined(__linux__) || defined(__QNX__)) && (defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)))
|
| 54 |
+
#define isEglSupported 1
|
| 55 |
+
#endif
|
| 56 |
+
|
| 57 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_atomic_functions.hpp
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__DEVICE_ATOMIC_FUNCTIONS_HPP__)
|
| 51 |
+
#define __DEVICE_ATOMIC_FUNCTIONS_HPP__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ __device__
|
| 55 |
+
#else /* __CUDACC_RTC__ */
|
| 56 |
+
#define __DEVICE_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
/*******************************************************************************
|
| 62 |
+
* *
|
| 63 |
+
* *
|
| 64 |
+
* *
|
| 65 |
+
*******************************************************************************/
|
| 66 |
+
|
| 67 |
+
#include "cuda_runtime_api.h"
|
| 68 |
+
|
| 69 |
+
/*******************************************************************************
|
| 70 |
+
* *
|
| 71 |
+
* *
|
| 72 |
+
* *
|
| 73 |
+
*******************************************************************************/
|
| 74 |
+
|
| 75 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAdd(int *address, int val)
|
| 76 |
+
{
|
| 77 |
+
return __iAtomicAdd(address, val);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAdd(unsigned int *address, unsigned int val)
|
| 81 |
+
{
|
| 82 |
+
return __uAtomicAdd(address, val);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicSub(int *address, int val)
|
| 86 |
+
{
|
| 87 |
+
return __iAtomicAdd(address, (unsigned int)-(int)val);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicSub(unsigned int *address, unsigned int val)
|
| 91 |
+
{
|
| 92 |
+
return __uAtomicAdd(address, (unsigned int)-(int)val);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicExch(int *address, int val)
|
| 96 |
+
{
|
| 97 |
+
return __iAtomicExch(address, val);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicExch(unsigned int *address, unsigned int val)
|
| 101 |
+
{
|
| 102 |
+
return __uAtomicExch(address, val);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ float atomicExch(float *address, float val)
|
| 106 |
+
{
|
| 107 |
+
return __fAtomicExch(address, val);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMin(int *address, int val)
|
| 111 |
+
{
|
| 112 |
+
return __iAtomicMin(address, val);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMin(unsigned int *address, unsigned int val)
|
| 116 |
+
{
|
| 117 |
+
return __uAtomicMin(address, val);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicMax(int *address, int val)
|
| 121 |
+
{
|
| 122 |
+
return __iAtomicMax(address, val);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicMax(unsigned int *address, unsigned int val)
|
| 126 |
+
{
|
| 127 |
+
return __uAtomicMax(address, val);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicInc(unsigned int *address, unsigned int val)
|
| 131 |
+
{
|
| 132 |
+
return __uAtomicInc(address, val);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicDec(unsigned int *address, unsigned int val)
|
| 136 |
+
{
|
| 137 |
+
return __uAtomicDec(address, val);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicAnd(int *address, int val)
|
| 141 |
+
{
|
| 142 |
+
return __iAtomicAnd(address, val);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicAnd(unsigned int *address, unsigned int val)
|
| 146 |
+
{
|
| 147 |
+
return __uAtomicAnd(address, val);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicOr(int *address, int val)
|
| 151 |
+
{
|
| 152 |
+
return __iAtomicOr(address, val);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicOr(unsigned int *address, unsigned int val)
|
| 156 |
+
{
|
| 157 |
+
return __uAtomicOr(address, val);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicXor(int *address, int val)
|
| 161 |
+
{
|
| 162 |
+
return __iAtomicXor(address, val);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicXor(unsigned int *address, unsigned int val)
|
| 166 |
+
{
|
| 167 |
+
return __uAtomicXor(address, val);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ int atomicCAS(int *address, int compare, int val)
|
| 171 |
+
{
|
| 172 |
+
return __iAtomicCAS(address, compare, val);
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned int atomicCAS(unsigned int *address, unsigned int compare, unsigned int val)
|
| 176 |
+
{
|
| 177 |
+
return __uAtomicCAS(address, compare, val);
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
/*******************************************************************************
|
| 181 |
+
* *
|
| 182 |
+
* *
|
| 183 |
+
* *
|
| 184 |
+
*******************************************************************************/
|
| 185 |
+
|
| 186 |
+
#include "cuda_runtime_api.h"
|
| 187 |
+
|
| 188 |
+
/*******************************************************************************
|
| 189 |
+
* *
|
| 190 |
+
* *
|
| 191 |
+
* *
|
| 192 |
+
*******************************************************************************/
|
| 193 |
+
|
| 194 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val)
|
| 195 |
+
{
|
| 196 |
+
return __ullAtomicAdd(address, val);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicExch(unsigned long long int *address, unsigned long long int val)
|
| 200 |
+
{
|
| 201 |
+
return __ullAtomicExch(address, val);
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ unsigned long long int atomicCAS(unsigned long long int *address, unsigned long long int compare, unsigned long long int val)
|
| 205 |
+
{
|
| 206 |
+
return __ullAtomicCAS(address, compare, val);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ bool any(bool cond)
|
| 210 |
+
{
|
| 211 |
+
return (bool)__any((int)cond);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
__DEVICE_ATOMIC_FUNCTIONS_DECL__ bool all(bool cond)
|
| 215 |
+
{
|
| 216 |
+
return (bool)__all((int)cond);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 220 |
+
|
| 221 |
+
#undef __DEVICE_ATOMIC_FUNCTIONS_DECL__
|
| 222 |
+
|
| 223 |
+
#endif /* !__DEVICE_ATOMIC_FUNCTIONS_HPP__ */
|
| 224 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_functions.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "device_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#include "crt/device_functions.h"
|
| 61 |
+
|
| 62 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__)
|
| 63 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 64 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_H_WRAPPER__
|
| 65 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_types.h
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__DEVICE_TYPES_H__)
|
| 51 |
+
#define __DEVICE_TYPES_H__
|
| 52 |
+
|
| 53 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 54 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 55 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__
|
| 56 |
+
#endif
|
| 57 |
+
|
| 58 |
+
#ifndef __DOXYGEN_ONLY__
|
| 59 |
+
#include "crt/host_defines.h"
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
/*******************************************************************************
|
| 63 |
+
* *
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
*******************************************************************************/
|
| 67 |
+
|
| 68 |
+
enum __device_builtin__ cudaRoundMode
|
| 69 |
+
{
|
| 70 |
+
cudaRoundNearest,
|
| 71 |
+
cudaRoundZero,
|
| 72 |
+
cudaRoundPosInf,
|
| 73 |
+
cudaRoundMinInf
|
| 74 |
+
};
|
| 75 |
+
|
| 76 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__)
|
| 77 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 78 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_TYPES_H__
|
| 79 |
+
#endif
|
| 80 |
+
|
| 81 |
+
#endif /* !__DEVICE_TYPES_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_defines.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "host_defines.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#include "crt/host_defines.h"
|
| 61 |
+
|
| 62 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__)
|
| 63 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 64 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H_WRAPPER__
|
| 65 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("math_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "math_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#include "crt/math_functions.h"
|
| 61 |
+
|
| 62 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__)
|
| 63 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 64 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__
|
| 65 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 52 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__
|
| 53 |
+
#endif
|
| 54 |
+
|
| 55 |
+
#include "crt/mma.h"
|
| 56 |
+
|
| 57 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__)
|
| 58 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 59 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__
|
| 60 |
+
#endif
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.h
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_20_ATOMIC_FUNCTIONS_H__)
|
| 51 |
+
#define __SM_20_ATOMIC_FUNCTIONS_H__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_20_ATOMIC_FUNCTIONS_DECL__ __device__
|
| 55 |
+
#elif defined(_NVHPC_CUDA)
|
| 56 |
+
#define __SM_20_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__
|
| 57 |
+
#else /* __CUDACC_RTC__ */
|
| 58 |
+
#define __SM_20_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
|
| 59 |
+
#endif /* __CUDACC_RTC__ */
|
| 60 |
+
|
| 61 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
#if defined(_NVHPC_CUDA)
|
| 72 |
+
#undef __device_builtin__
|
| 73 |
+
#define __device_builtin__ __location__(device) __location__(host)
|
| 74 |
+
#endif /* _NVHPC_CUDA */
|
| 75 |
+
|
| 76 |
+
/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA
|
| 77 |
+
* C++ compiler where the macro __CUDA_ARCH__ is not defined. */
|
| 78 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 79 |
+
#define __DEF_IF_HOST { }
|
| 80 |
+
#else /* !__CUDA_ARCH__ */
|
| 81 |
+
#define __DEF_IF_HOST ;
|
| 82 |
+
#endif /* __CUDA_ARCH__ */
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA)
|
| 86 |
+
extern "C"
|
| 87 |
+
{
|
| 88 |
+
extern __device__ __device_builtin__ float __fAtomicAdd(float *address, float val);
|
| 89 |
+
}
|
| 90 |
+
#endif /* __CUDA_ARCH__ */
|
| 91 |
+
|
| 92 |
+
#if defined(_NVHPC_CUDA)
|
| 93 |
+
#undef __device_builtin__
|
| 94 |
+
#define __device_builtin__
|
| 95 |
+
#endif /* _NVHPC_CUDA */
|
| 96 |
+
|
| 97 |
+
/*******************************************************************************
|
| 98 |
+
* *
|
| 99 |
+
* *
|
| 100 |
+
* *
|
| 101 |
+
*******************************************************************************/
|
| 102 |
+
|
| 103 |
+
__SM_20_ATOMIC_FUNCTIONS_DECL__ float atomicAdd(float *address, float val) __DEF_IF_HOST
|
| 104 |
+
|
| 105 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 106 |
+
|
| 107 |
+
#undef __DEF_IF_HOST
|
| 108 |
+
#undef __SM_20_ATOMIC_FUNCTIONS_DECL__
|
| 109 |
+
|
| 110 |
+
#if !defined(__CUDACC_RTC__) && (defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA))
|
| 111 |
+
#include "sm_20_atomic_functions.hpp"
|
| 112 |
+
#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) */
|
| 113 |
+
|
| 114 |
+
#endif /* !__SM_20_ATOMIC_FUNCTIONS_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_30_INTRINSICS_H__)
|
| 51 |
+
#define __SM_30_INTRINSICS_H__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_30_INTRINSICS_DECL__ __device__
|
| 55 |
+
#elif defined(_NVHPC_CUDA)
|
| 56 |
+
#define __SM_30_INTRINSICS_DECL__ extern __device__ __cudart_builtin__
|
| 57 |
+
#else /* !__CUDACC_RTC__ */
|
| 58 |
+
#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__
|
| 59 |
+
#endif /* __CUDACC_RTC__ */
|
| 60 |
+
|
| 61 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 62 |
+
|
| 63 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
|
| 64 |
+
|
| 65 |
+
/*******************************************************************************
|
| 66 |
+
* *
|
| 67 |
+
* *
|
| 68 |
+
* *
|
| 69 |
+
*******************************************************************************/
|
| 70 |
+
|
| 71 |
+
#include "cuda_runtime_api.h"
|
| 72 |
+
|
| 73 |
+
/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA
|
| 74 |
+
* C++ compiler where the macro __CUDA_ARCH__ is not defined. */
|
| 75 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 76 |
+
#define __DEF_IF_HOST { }
|
| 77 |
+
#else /* !__CUDA_ARCH__ */
|
| 78 |
+
#define __DEF_IF_HOST ;
|
| 79 |
+
#endif /* __CUDA_ARCH__ */
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
/*******************************************************************************
|
| 83 |
+
* *
|
| 84 |
+
* Below are declarations of SM-3.0 intrinsics which are included as *
|
| 85 |
+
* source (instead of being built in to the compiler) *
|
| 86 |
+
* *
|
| 87 |
+
*******************************************************************************/
|
| 88 |
+
|
| 89 |
+
#if !defined warpSize && !defined __local_warpSize
|
| 90 |
+
#define warpSize 32
|
| 91 |
+
#define __local_warpSize
|
| 92 |
+
#endif
|
| 93 |
+
|
| 94 |
+
#if defined(_WIN32)
|
| 95 |
+
# define __DEPRECATED__(msg) __declspec(deprecated(msg))
|
| 96 |
+
#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
|
| 97 |
+
# define __DEPRECATED__(msg) __attribute__((deprecated))
|
| 98 |
+
#else
|
| 99 |
+
# define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
|
| 100 |
+
#endif
|
| 101 |
+
|
| 102 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 103 |
+
#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)."
|
| 104 |
+
#elif defined(_NVHPC_CUDA)
|
| 105 |
+
#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()."
|
| 106 |
+
#endif
|
| 107 |
+
|
| 108 |
+
__SM_30_INTRINSICS_DECL__ unsigned __fns(unsigned mask, unsigned base, int offset) __DEF_IF_HOST
|
| 109 |
+
__SM_30_INTRINSICS_DECL__ void __barrier_sync(unsigned id) __DEF_IF_HOST
|
| 110 |
+
__SM_30_INTRINSICS_DECL__ void __barrier_sync_count(unsigned id, unsigned cnt) __DEF_IF_HOST
|
| 111 |
+
__SM_30_INTRINSICS_DECL__ void __syncwarp(unsigned mask=0xFFFFFFFF) __DEF_IF_HOST
|
| 112 |
+
__SM_30_INTRINSICS_DECL__ int __all_sync(unsigned mask, int pred) __DEF_IF_HOST
|
| 113 |
+
__SM_30_INTRINSICS_DECL__ int __any_sync(unsigned mask, int pred) __DEF_IF_HOST
|
| 114 |
+
__SM_30_INTRINSICS_DECL__ int __uni_sync(unsigned mask, int pred) __DEF_IF_HOST
|
| 115 |
+
__SM_30_INTRINSICS_DECL__ unsigned __ballot_sync(unsigned mask, int pred) __DEF_IF_HOST
|
| 116 |
+
__SM_30_INTRINSICS_DECL__ unsigned __activemask() __DEF_IF_HOST
|
| 117 |
+
|
| 118 |
+
// Warp register exchange (shuffle) intrinsics.
|
| 119 |
+
// Notes:
|
| 120 |
+
// a) Warp size is hardcoded to 32 here, because the compiler does not know
|
| 121 |
+
// the "warpSize" constant at this time
|
| 122 |
+
// b) we cannot map the float __shfl to the int __shfl because it'll mess with
|
| 123 |
+
// the register number (especially if you're doing two shfls to move a double).
|
| 124 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 125 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) int __shfl(int var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 126 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned int __shfl(unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 127 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) int __shfl_up(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 128 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned int __shfl_up(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 129 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) int __shfl_down(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 130 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned int __shfl_down(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 131 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) int __shfl_xor(int var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 132 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned int __shfl_xor(unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 133 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) float __shfl(float var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 134 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) float __shfl_up(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 135 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) float __shfl_down(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 136 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) float __shfl_xor(float var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 137 |
+
#endif
|
| 138 |
+
|
| 139 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 140 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 141 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 142 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 143 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 144 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 145 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 146 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 147 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 148 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 149 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 150 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 151 |
+
|
| 152 |
+
// 64-bits SHFL
|
| 153 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 154 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long long __shfl(unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 155 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long long __shfl(long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 156 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long long __shfl_up(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 157 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 158 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long long __shfl_down(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 159 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 160 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long long __shfl_xor(long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 161 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 162 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) double __shfl(double var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 163 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) double __shfl_up(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 164 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) double __shfl_down(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 165 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) double __shfl_xor(double var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 166 |
+
#endif
|
| 167 |
+
|
| 168 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 169 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 170 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 171 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 172 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 173 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 174 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 175 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 176 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 177 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 178 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 179 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 180 |
+
|
| 181 |
+
// long needs some help to choose between 32-bits and 64-bits
|
| 182 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 183 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long __shfl(long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 184 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long __shfl(unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 185 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long __shfl_up(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 186 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long __shfl_up(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 187 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long __shfl_down(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 188 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long __shfl_down(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 189 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long __shfl_xor(long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 190 |
+
__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long __shfl_xor(unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 191 |
+
#endif
|
| 192 |
+
|
| 193 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 194 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST
|
| 195 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 196 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 197 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 198 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
|
| 199 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 200 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST
|
| 201 |
+
|
| 202 |
+
#undef __DEPRECATED__
|
| 203 |
+
#undef __WSB_DEPRECATION_MESSAGE
|
| 204 |
+
|
| 205 |
+
#if defined(__local_warpSize)
|
| 206 |
+
#undef warpSize
|
| 207 |
+
#undef __local_warpSize
|
| 208 |
+
#endif
|
| 209 |
+
|
| 210 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */
|
| 211 |
+
|
| 212 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 213 |
+
|
| 214 |
+
#undef __DEF_IF_HOST
|
| 215 |
+
#undef __SM_30_INTRINSICS_DECL__
|
| 216 |
+
|
| 217 |
+
#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
|
| 218 |
+
#include "sm_30_intrinsics.hpp"
|
| 219 |
+
#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
|
| 220 |
+
|
| 221 |
+
#endif /* !__SM_30_INTRINSICS_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_30_INTRINSICS_HPP__)
|
| 51 |
+
#define __SM_30_INTRINSICS_HPP__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_30_INTRINSICS_DECL__ __device__
|
| 55 |
+
#else /* !__CUDACC_RTC__ */
|
| 56 |
+
#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
// In here are intrinsics which are built in to the compiler. These may be
|
| 72 |
+
// referenced by intrinsic implementations from this file.
|
| 73 |
+
extern "C"
|
| 74 |
+
{
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
/*******************************************************************************
|
| 78 |
+
* *
|
| 79 |
+
* Below are implementations of SM-3.0 intrinsics which are included as *
|
| 80 |
+
* source (instead of being built in to the compiler) *
|
| 81 |
+
* *
|
| 82 |
+
*******************************************************************************/
|
| 83 |
+
|
| 84 |
+
#if !defined warpSize && !defined __local_warpSize
|
| 85 |
+
#define warpSize 32
|
| 86 |
+
#define __local_warpSize
|
| 87 |
+
#endif
|
| 88 |
+
|
| 89 |
+
__SM_30_INTRINSICS_DECL__
|
| 90 |
+
unsigned __fns(unsigned mask, unsigned base, int offset) {
|
| 91 |
+
extern __device__ __device_builtin__ unsigned int __nvvm_fns(unsigned int mask, unsigned int base, int offset);
|
| 92 |
+
return __nvvm_fns(mask, base, offset);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
__SM_30_INTRINSICS_DECL__
|
| 96 |
+
void __barrier_sync(unsigned id) {
|
| 97 |
+
extern __device__ __device_builtin__ void __nvvm_barrier_sync(unsigned id);
|
| 98 |
+
return __nvvm_barrier_sync(id);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
__SM_30_INTRINSICS_DECL__
|
| 102 |
+
void __barrier_sync_count(unsigned id, unsigned cnt) {
|
| 103 |
+
extern __device__ __device_builtin__ void __nvvm_barrier_sync_cnt(unsigned id, unsigned cnt);
|
| 104 |
+
return __nvvm_barrier_sync_cnt(id, cnt);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
__SM_30_INTRINSICS_DECL__
|
| 108 |
+
void __syncwarp(unsigned mask) {
|
| 109 |
+
extern __device__ __device_builtin__ void __nvvm_bar_warp_sync(unsigned mask);
|
| 110 |
+
return __nvvm_bar_warp_sync(mask);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
__SM_30_INTRINSICS_DECL__
|
| 114 |
+
int __all_sync(unsigned mask, int pred) {
|
| 115 |
+
extern __device__ __device_builtin__ int __nvvm_vote_all_sync(unsigned int mask, int pred);
|
| 116 |
+
return __nvvm_vote_all_sync(mask, pred);
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
__SM_30_INTRINSICS_DECL__
|
| 120 |
+
int __any_sync(unsigned mask, int pred) {
|
| 121 |
+
extern __device__ __device_builtin__ int __nvvm_vote_any_sync(unsigned int mask, int pred);
|
| 122 |
+
return __nvvm_vote_any_sync(mask, pred);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__SM_30_INTRINSICS_DECL__
|
| 126 |
+
int __uni_sync(unsigned mask, int pred) {
|
| 127 |
+
extern __device__ __device_builtin__ int __nvvm_vote_uni_sync(unsigned int mask, int pred);
|
| 128 |
+
return __nvvm_vote_uni_sync(mask, pred);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
__SM_30_INTRINSICS_DECL__
|
| 132 |
+
unsigned __ballot_sync(unsigned mask, int pred) {
|
| 133 |
+
extern __device__ __device_builtin__ unsigned int __nvvm_vote_ballot_sync(unsigned int mask, int pred);
|
| 134 |
+
return __nvvm_vote_ballot_sync(mask, pred);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
__SM_30_INTRINSICS_DECL__
|
| 138 |
+
unsigned __activemask() {
|
| 139 |
+
unsigned ret;
|
| 140 |
+
asm volatile ("activemask.b32 %0;" : "=r"(ret));
|
| 141 |
+
return ret;
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
// These are removed starting with compute_70 and onwards
|
| 145 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
|
| 146 |
+
|
| 147 |
+
__SM_30_INTRINSICS_DECL__ int __shfl(int var, int srcLane, int width) {
|
| 148 |
+
int ret;
|
| 149 |
+
int c = ((warpSize-width) << 8) | 0x1f;
|
| 150 |
+
asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(srcLane), "r"(c));
|
| 151 |
+
return ret;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl(unsigned int var, int srcLane, int width) {
|
| 155 |
+
return (unsigned int) __shfl((int)var, srcLane, width);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_up(int var, unsigned int delta, int width) {
|
| 159 |
+
int ret;
|
| 160 |
+
int c = (warpSize-width) << 8;
|
| 161 |
+
asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c));
|
| 162 |
+
return ret;
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up(unsigned int var, unsigned int delta, int width) {
|
| 166 |
+
return (unsigned int) __shfl_up((int)var, delta, width);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_down(int var, unsigned int delta, int width) {
|
| 170 |
+
int ret;
|
| 171 |
+
int c = ((warpSize-width) << 8) | 0x1f;
|
| 172 |
+
asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c));
|
| 173 |
+
return ret;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down(unsigned int var, unsigned int delta, int width) {
|
| 177 |
+
return (unsigned int) __shfl_down((int)var, delta, width);
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_xor(int var, int laneMask, int width) {
|
| 181 |
+
int ret;
|
| 182 |
+
int c = ((warpSize-width) << 8) | 0x1f;
|
| 183 |
+
asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(laneMask), "r"(c));
|
| 184 |
+
return ret;
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor(unsigned int var, int laneMask, int width) {
|
| 188 |
+
return (unsigned int) __shfl_xor((int)var, laneMask, width);
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
__SM_30_INTRINSICS_DECL__ float __shfl(float var, int srcLane, int width) {
|
| 192 |
+
float ret;
|
| 193 |
+
int c;
|
| 194 |
+
c = ((warpSize-width) << 8) | 0x1f;
|
| 195 |
+
asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(srcLane), "r"(c));
|
| 196 |
+
return ret;
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_up(float var, unsigned int delta, int width) {
|
| 200 |
+
float ret;
|
| 201 |
+
int c;
|
| 202 |
+
c = (warpSize-width) << 8;
|
| 203 |
+
asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c));
|
| 204 |
+
return ret;
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_down(float var, unsigned int delta, int width) {
|
| 208 |
+
float ret;
|
| 209 |
+
int c;
|
| 210 |
+
c = ((warpSize-width) << 8) | 0x1f;
|
| 211 |
+
asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c));
|
| 212 |
+
return ret;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_xor(float var, int laneMask, int width) {
|
| 216 |
+
float ret;
|
| 217 |
+
int c;
|
| 218 |
+
c = ((warpSize-width) << 8) | 0x1f;
|
| 219 |
+
asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(laneMask), "r"(c));
|
| 220 |
+
return ret;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
// 64-bits SHFL
|
| 224 |
+
|
| 225 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl(long long var, int srcLane, int width) {
|
| 226 |
+
int lo, hi;
|
| 227 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 228 |
+
hi = __shfl(hi, srcLane, width);
|
| 229 |
+
lo = __shfl(lo, srcLane, width);
|
| 230 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 231 |
+
return var;
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl(unsigned long long var, int srcLane, int width) {
|
| 235 |
+
return (unsigned long long) __shfl((long long) var, srcLane, width);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_up(long long var, unsigned int delta, int width) {
|
| 239 |
+
int lo, hi;
|
| 240 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 241 |
+
hi = __shfl_up(hi, delta, width);
|
| 242 |
+
lo = __shfl_up(lo, delta, width);
|
| 243 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 244 |
+
return var;
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width) {
|
| 248 |
+
return (unsigned long long) __shfl_up((long long) var, delta, width);
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_down(long long var, unsigned int delta, int width) {
|
| 252 |
+
int lo, hi;
|
| 253 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 254 |
+
hi = __shfl_down(hi, delta, width);
|
| 255 |
+
lo = __shfl_down(lo, delta, width);
|
| 256 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 257 |
+
return var;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width) {
|
| 261 |
+
return (unsigned long long) __shfl_down((long long) var, delta, width);
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_xor(long long var, int laneMask, int width) {
|
| 265 |
+
int lo, hi;
|
| 266 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 267 |
+
hi = __shfl_xor(hi, laneMask, width);
|
| 268 |
+
lo = __shfl_xor(lo, laneMask, width);
|
| 269 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 270 |
+
return var;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width) {
|
| 274 |
+
return (unsigned long long) __shfl_xor((long long) var, laneMask, width);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
__SM_30_INTRINSICS_DECL__ double __shfl(double var, int srcLane, int width) {
|
| 278 |
+
unsigned lo, hi;
|
| 279 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 280 |
+
hi = __shfl(hi, srcLane, width);
|
| 281 |
+
lo = __shfl(lo, srcLane, width);
|
| 282 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 283 |
+
return var;
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_up(double var, unsigned int delta, int width) {
|
| 287 |
+
unsigned lo, hi;
|
| 288 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 289 |
+
hi = __shfl_up(hi, delta, width);
|
| 290 |
+
lo = __shfl_up(lo, delta, width);
|
| 291 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 292 |
+
return var;
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_down(double var, unsigned int delta, int width) {
|
| 296 |
+
unsigned lo, hi;
|
| 297 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 298 |
+
hi = __shfl_down(hi, delta, width);
|
| 299 |
+
lo = __shfl_down(lo, delta, width);
|
| 300 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 301 |
+
return var;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_xor(double var, int laneMask, int width) {
|
| 305 |
+
unsigned lo, hi;
|
| 306 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 307 |
+
hi = __shfl_xor(hi, laneMask, width);
|
| 308 |
+
lo = __shfl_xor(lo, laneMask, width);
|
| 309 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 310 |
+
return var;
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
__SM_30_INTRINSICS_DECL__ long __shfl(long var, int srcLane, int width) {
|
| 314 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 315 |
+
__shfl((long long) var, srcLane, width) :
|
| 316 |
+
__shfl((int) var, srcLane, width);
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl(unsigned long var, int srcLane, int width) {
|
| 320 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 321 |
+
__shfl((unsigned long long) var, srcLane, width) :
|
| 322 |
+
__shfl((unsigned int) var, srcLane, width);
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_up(long var, unsigned int delta, int width) {
|
| 326 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 327 |
+
__shfl_up((long long) var, delta, width) :
|
| 328 |
+
__shfl_up((int) var, delta, width);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up(unsigned long var, unsigned int delta, int width) {
|
| 332 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 333 |
+
__shfl_up((unsigned long long) var, delta, width) :
|
| 334 |
+
__shfl_up((unsigned int) var, delta, width);
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_down(long var, unsigned int delta, int width) {
|
| 338 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 339 |
+
__shfl_down((long long) var, delta, width) :
|
| 340 |
+
__shfl_down((int) var, delta, width);
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down(unsigned long var, unsigned int delta, int width) {
|
| 344 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 345 |
+
__shfl_down((unsigned long long) var, delta, width) :
|
| 346 |
+
__shfl_down((unsigned int) var, delta, width);
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_xor(long var, int laneMask, int width) {
|
| 350 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 351 |
+
__shfl_xor((long long) var, laneMask, width) :
|
| 352 |
+
__shfl_xor((int) var, laneMask, width);
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor(unsigned long var, int laneMask, int width) {
|
| 356 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 357 |
+
__shfl_xor((unsigned long long) var, laneMask, width) :
|
| 358 |
+
__shfl_xor((unsigned int) var, laneMask, width);
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
|
| 362 |
+
|
| 363 |
+
// Warp register exchange (shuffle) intrinsics.
|
| 364 |
+
// Notes:
|
| 365 |
+
// a) Warp size is hardcoded to 32 here, because the compiler does not know
|
| 366 |
+
// the "warpSize" constant at this time
|
| 367 |
+
// b) we cannot map the float __shfl to the int __shfl because it'll mess with
|
| 368 |
+
// the register number (especially if you're doing two shfls to move a double).
|
| 369 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width) {
|
| 370 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 371 |
+
int ret;
|
| 372 |
+
int c = ((warpSize-width) << 8) | 0x1f;
|
| 373 |
+
ret = __nvvm_shfl_idx_sync(mask, var, srcLane, c);
|
| 374 |
+
return ret;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width) {
|
| 378 |
+
return (unsigned int) __shfl_sync(mask, (int)var, srcLane, width);
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width) {
|
| 382 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 383 |
+
int ret;
|
| 384 |
+
int c = (warpSize-width) << 8;
|
| 385 |
+
ret = __nvvm_shfl_up_sync(mask, var, delta, c);
|
| 386 |
+
return ret;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width) {
|
| 390 |
+
return (unsigned int) __shfl_up_sync(mask, (int)var, delta, width);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width) {
|
| 394 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 395 |
+
int ret;
|
| 396 |
+
int c = ((warpSize-width) << 8) | 0x1f;
|
| 397 |
+
ret = __nvvm_shfl_down_sync(mask, var, delta, c);
|
| 398 |
+
return ret;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width) {
|
| 402 |
+
return (unsigned int) __shfl_down_sync(mask, (int)var, delta, width);
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width) {
|
| 406 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 407 |
+
int ret;
|
| 408 |
+
int c = ((warpSize-width) << 8) | 0x1f;
|
| 409 |
+
ret = __nvvm_shfl_bfly_sync(mask, var, laneMask, c);
|
| 410 |
+
return ret;
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width) {
|
| 414 |
+
return (unsigned int) __shfl_xor_sync(mask, (int)var, laneMask, width);
|
| 415 |
+
}
|
| 416 |
+
|
| 417 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width) {
|
| 418 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 419 |
+
int ret;
|
| 420 |
+
int c;
|
| 421 |
+
c = ((warpSize-width) << 8) | 0x1f;
|
| 422 |
+
ret = __nvvm_shfl_idx_sync(mask, __float_as_int(var), srcLane, c);
|
| 423 |
+
return __int_as_float(ret);
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width) {
|
| 427 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 428 |
+
int ret;
|
| 429 |
+
int c;
|
| 430 |
+
c = (warpSize-width) << 8;
|
| 431 |
+
ret = __nvvm_shfl_up_sync(mask, __float_as_int(var), delta, c);
|
| 432 |
+
return __int_as_float(ret);
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width) {
|
| 436 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 437 |
+
int ret;
|
| 438 |
+
int c;
|
| 439 |
+
c = ((warpSize-width) << 8) | 0x1f;
|
| 440 |
+
ret = __nvvm_shfl_down_sync(mask, __float_as_int(var), delta, c);
|
| 441 |
+
return __int_as_float(ret);
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width) {
|
| 445 |
+
extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
|
| 446 |
+
int ret;
|
| 447 |
+
int c;
|
| 448 |
+
c = ((warpSize-width) << 8) | 0x1f;
|
| 449 |
+
ret = __nvvm_shfl_bfly_sync(mask, __float_as_int(var), laneMask, c);
|
| 450 |
+
return __int_as_float(ret);
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
// 64-bits SHFL
|
| 454 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width) {
|
| 455 |
+
int lo, hi;
|
| 456 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 457 |
+
hi = __shfl_sync(mask, hi, srcLane, width);
|
| 458 |
+
lo = __shfl_sync(mask, lo, srcLane, width);
|
| 459 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 460 |
+
return var;
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width) {
|
| 464 |
+
return (unsigned long long) __shfl_sync(mask, (long long) var, srcLane, width);
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width) {
|
| 468 |
+
int lo, hi;
|
| 469 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 470 |
+
hi = __shfl_up_sync(mask, hi, delta, width);
|
| 471 |
+
lo = __shfl_up_sync(mask, lo, delta, width);
|
| 472 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 473 |
+
return var;
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) {
|
| 477 |
+
return (unsigned long long) __shfl_up_sync(mask, (long long) var, delta, width);
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width) {
|
| 481 |
+
int lo, hi;
|
| 482 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 483 |
+
hi = __shfl_down_sync(mask, hi, delta, width);
|
| 484 |
+
lo = __shfl_down_sync(mask, lo, delta, width);
|
| 485 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 486 |
+
return var;
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) {
|
| 490 |
+
return (unsigned long long) __shfl_down_sync(mask, (long long) var, delta, width);
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width) {
|
| 494 |
+
int lo, hi;
|
| 495 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
|
| 496 |
+
hi = __shfl_xor_sync(mask, hi, laneMask, width);
|
| 497 |
+
lo = __shfl_xor_sync(mask, lo, laneMask, width);
|
| 498 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
|
| 499 |
+
return var;
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width) {
|
| 503 |
+
return (unsigned long long) __shfl_xor_sync(mask, (long long) var, laneMask, width);
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width) {
|
| 507 |
+
unsigned lo, hi;
|
| 508 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 509 |
+
hi = __shfl_sync(mask, hi, srcLane, width);
|
| 510 |
+
lo = __shfl_sync(mask, lo, srcLane, width);
|
| 511 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 512 |
+
return var;
|
| 513 |
+
}
|
| 514 |
+
|
| 515 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width) {
|
| 516 |
+
unsigned lo, hi;
|
| 517 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 518 |
+
hi = __shfl_up_sync(mask, hi, delta, width);
|
| 519 |
+
lo = __shfl_up_sync(mask, lo, delta, width);
|
| 520 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 521 |
+
return var;
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width) {
|
| 525 |
+
unsigned lo, hi;
|
| 526 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 527 |
+
hi = __shfl_down_sync(mask, hi, delta, width);
|
| 528 |
+
lo = __shfl_down_sync(mask, lo, delta, width);
|
| 529 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 530 |
+
return var;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width) {
|
| 534 |
+
unsigned lo, hi;
|
| 535 |
+
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
|
| 536 |
+
hi = __shfl_xor_sync(mask, hi, laneMask, width);
|
| 537 |
+
lo = __shfl_xor_sync(mask, lo, laneMask, width);
|
| 538 |
+
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
|
| 539 |
+
return var;
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
// long needs some help to choose between 32-bits and 64-bits
|
| 543 |
+
|
| 544 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width) {
|
| 545 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 546 |
+
__shfl_sync(mask, (long long) var, srcLane, width) :
|
| 547 |
+
__shfl_sync(mask, (int) var, srcLane, width);
|
| 548 |
+
}
|
| 549 |
+
|
| 550 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width) {
|
| 551 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 552 |
+
__shfl_sync(mask, (unsigned long long) var, srcLane, width) :
|
| 553 |
+
__shfl_sync(mask, (unsigned int) var, srcLane, width);
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width) {
|
| 557 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 558 |
+
__shfl_up_sync(mask, (long long) var, delta, width) :
|
| 559 |
+
__shfl_up_sync(mask, (int) var, delta, width);
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width) {
|
| 563 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 564 |
+
__shfl_up_sync(mask, (unsigned long long) var, delta, width) :
|
| 565 |
+
__shfl_up_sync(mask, (unsigned int) var, delta, width);
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width) {
|
| 569 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 570 |
+
__shfl_down_sync(mask, (long long) var, delta, width) :
|
| 571 |
+
__shfl_down_sync(mask, (int) var, delta, width);
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width) {
|
| 575 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 576 |
+
__shfl_down_sync(mask, (unsigned long long) var, delta, width) :
|
| 577 |
+
__shfl_down_sync(mask, (unsigned int) var, delta, width);
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width) {
|
| 581 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 582 |
+
__shfl_xor_sync(mask, (long long) var, laneMask, width) :
|
| 583 |
+
__shfl_xor_sync(mask, (int) var, laneMask, width);
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width) {
|
| 587 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 588 |
+
__shfl_xor_sync(mask, (unsigned long long) var, laneMask, width) :
|
| 589 |
+
__shfl_xor_sync(mask, (unsigned int) var, laneMask, width);
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
#if defined(__local_warpSize)
|
| 593 |
+
#undef warpSize
|
| 594 |
+
#undef __local_warpSize
|
| 595 |
+
#endif
|
| 596 |
+
|
| 597 |
+
#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */
|
| 598 |
+
|
| 599 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 600 |
+
|
| 601 |
+
#undef __SM_30_INTRINSICS_DECL__
|
| 602 |
+
|
| 603 |
+
#endif /* !__SM_30_INTRINSICS_HPP__ */
|
| 604 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.h
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.35.235 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_32_ATOMIC_FUNCTIONS_H__)
|
| 51 |
+
#define __SM_32_ATOMIC_FUNCTIONS_H__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__
|
| 55 |
+
#else /* !__CUDACC_RTC__ */
|
| 56 |
+
#define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
#if defined(_NVHPC_CUDA)
|
| 72 |
+
#undef __device_builtin__
|
| 73 |
+
#define __device_builtin__ __location__(device) __location__(host)
|
| 74 |
+
#endif /* _NVHPC_CUDA */
|
| 75 |
+
|
| 76 |
+
#ifndef __CUDA_ARCH__
|
| 77 |
+
#define __DEF_IF_HOST { }
|
| 78 |
+
#else /* !__CUDA_ARCH__ */
|
| 79 |
+
#define __DEF_IF_HOST ;
|
| 80 |
+
#endif /* __CUDA_ARCH__ */
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
#ifdef __CUDA_ARCH__
|
| 84 |
+
extern "C"
|
| 85 |
+
{
|
| 86 |
+
extern __device__ __device_builtin__ long long __illAtomicMin(long long *address, long long val);
|
| 87 |
+
extern __device__ __device_builtin__ long long __illAtomicMax(long long *address, long long val);
|
| 88 |
+
extern __device__ __device_builtin__ long long __llAtomicAnd(long long *address, long long val);
|
| 89 |
+
extern __device__ __device_builtin__ long long __llAtomicOr(long long *address, long long val);
|
| 90 |
+
extern __device__ __device_builtin__ long long __llAtomicXor(long long *address, long long val);
|
| 91 |
+
extern __device__ __device_builtin__ unsigned long long __ullAtomicMin(unsigned long long *address, unsigned long long val);
|
| 92 |
+
extern __device__ __device_builtin__ unsigned long long __ullAtomicMax(unsigned long long *address, unsigned long long val);
|
| 93 |
+
extern __device__ __device_builtin__ unsigned long long __ullAtomicAnd(unsigned long long *address, unsigned long long val);
|
| 94 |
+
extern __device__ __device_builtin__ unsigned long long __ullAtomicOr (unsigned long long *address, unsigned long long val);
|
| 95 |
+
extern __device__ __device_builtin__ unsigned long long __ullAtomicXor(unsigned long long *address, unsigned long long val);
|
| 96 |
+
}
|
| 97 |
+
#endif /* __CUDA_ARCH__ */
|
| 98 |
+
|
| 99 |
+
#if defined(_NVHPC_CUDA)
|
| 100 |
+
#undef __device_builtin__
|
| 101 |
+
#define __device_builtin__
|
| 102 |
+
#endif /* _NVHPC_CUDA */
|
| 103 |
+
|
| 104 |
+
/*******************************************************************************
|
| 105 |
+
* *
|
| 106 |
+
* *
|
| 107 |
+
* *
|
| 108 |
+
*******************************************************************************/
|
| 109 |
+
|
| 110 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val) __DEF_IF_HOST
|
| 111 |
+
|
| 112 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val) __DEF_IF_HOST
|
| 113 |
+
|
| 114 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val) __DEF_IF_HOST
|
| 115 |
+
|
| 116 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val) __DEF_IF_HOST
|
| 117 |
+
|
| 118 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val) __DEF_IF_HOST
|
| 119 |
+
|
| 120 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 121 |
+
|
| 122 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 123 |
+
|
| 124 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 125 |
+
|
| 126 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 127 |
+
|
| 128 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 129 |
+
|
| 130 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
|
| 131 |
+
|
| 132 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 133 |
+
|
| 134 |
+
#undef __DEF_IF_HOST
|
| 135 |
+
#undef __SM_32_ATOMIC_FUNCTIONS_DECL__
|
| 136 |
+
|
| 137 |
+
#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
|
| 138 |
+
#include "sm_32_atomic_functions.hpp"
|
| 139 |
+
#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
|
| 140 |
+
|
| 141 |
+
#endif /* !__SM_32_ATOMIC_FUNCTIONS_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.35.235 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_32_ATOMIC_FUNCTIONS_HPP__)
|
| 51 |
+
#define __SM_32_ATOMIC_FUNCTIONS_HPP__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__
|
| 55 |
+
#else /* !__CUDACC_RTC__ */
|
| 56 |
+
#define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
/*******************************************************************************
|
| 72 |
+
* *
|
| 73 |
+
* *
|
| 74 |
+
* *
|
| 75 |
+
*******************************************************************************/
|
| 76 |
+
|
| 77 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val)
|
| 78 |
+
{
|
| 79 |
+
return __illAtomicMin(address, val);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val)
|
| 83 |
+
{
|
| 84 |
+
return __illAtomicMax(address, val);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val)
|
| 88 |
+
{
|
| 89 |
+
return __llAtomicAnd(address, val);
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val)
|
| 93 |
+
{
|
| 94 |
+
return __llAtomicOr(address, val);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val)
|
| 98 |
+
{
|
| 99 |
+
return __llAtomicXor(address, val);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val)
|
| 103 |
+
{
|
| 104 |
+
return __ullAtomicMin(address, val);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val)
|
| 108 |
+
{
|
| 109 |
+
return __ullAtomicMax(address, val);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val)
|
| 113 |
+
{
|
| 114 |
+
return __ullAtomicAnd(address, val);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val)
|
| 118 |
+
{
|
| 119 |
+
return __ullAtomicOr(address, val);
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
__SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val)
|
| 123 |
+
{
|
| 124 |
+
return __ullAtomicXor(address, val);
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
|
| 128 |
+
|
| 129 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 130 |
+
|
| 131 |
+
#undef __SM_32_ATOMIC_FUNCTIONS_DECL__
|
| 132 |
+
|
| 133 |
+
#endif /* !__SM_32_ATOMIC_FUNCTIONS_HPP__ */
|
| 134 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp
ADDED
|
@@ -0,0 +1,588 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_32_INTRINSICS_HPP__)
|
| 51 |
+
#define __SM_32_INTRINSICS_HPP__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_32_INTRINSICS_DECL__ __device__
|
| 55 |
+
#else /* !__CUDACC_RTC__ */
|
| 56 |
+
#define __SM_32_INTRINSICS_DECL__ static __device__ __inline__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
// In here are intrinsics which are built in to the compiler. These may be
|
| 72 |
+
// referenced by intrinsic implementations from this file.
|
| 73 |
+
extern "C"
|
| 74 |
+
{
|
| 75 |
+
// There are no intrinsics built in to the compiler for SM-3.5,
|
| 76 |
+
// all intrinsics are now implemented as inline PTX below.
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
/*******************************************************************************
|
| 80 |
+
* *
|
| 81 |
+
* Below are implementations of SM-3.5 intrinsics which are included as *
|
| 82 |
+
* source (instead of being built in to the compiler) *
|
| 83 |
+
* *
|
| 84 |
+
*******************************************************************************/
|
| 85 |
+
|
| 86 |
+
// LDG is a "load from global via texture path" command which can exhibit higher
|
| 87 |
+
// bandwidth on GK110 than a regular LD.
|
| 88 |
+
// Define a different pointer storage size for 64 and 32 bit
|
| 89 |
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
| 90 |
+
#define __LDG_PTR "l"
|
| 91 |
+
#else
|
| 92 |
+
#define __LDG_PTR "r"
|
| 93 |
+
#endif
|
| 94 |
+
|
| 95 |
+
/******************************************************************************
|
| 96 |
+
* __ldg *
|
| 97 |
+
******************************************************************************/
|
| 98 |
+
|
| 99 |
+
// Size of long is architecture and OS specific.
|
| 100 |
+
#if defined(__LP64__) // 64 bits
|
| 101 |
+
__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 102 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 103 |
+
#else // 32 bits
|
| 104 |
+
__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 105 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 106 |
+
#endif
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
__SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
|
| 110 |
+
__SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
|
| 111 |
+
__SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
|
| 112 |
+
__SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
|
| 113 |
+
__SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
|
| 114 |
+
__SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.nc.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
|
| 115 |
+
__SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.nc.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
|
| 116 |
+
__SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.nc.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 117 |
+
__SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.nc.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 118 |
+
__SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.nc.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 119 |
+
__SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.nc.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 120 |
+
__SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.nc.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 121 |
+
|
| 122 |
+
__SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
|
| 123 |
+
__SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 124 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 125 |
+
__SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 126 |
+
__SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.nc.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
|
| 127 |
+
__SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.nc.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
|
| 128 |
+
__SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.nc.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 129 |
+
__SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.nc.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 130 |
+
__SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.nc.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 131 |
+
__SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 132 |
+
__SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 133 |
+
|
| 134 |
+
__SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) { float ret; asm volatile ("ld.global.nc.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 135 |
+
__SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) { double ret; asm volatile ("ld.global.nc.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 136 |
+
__SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.nc.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 137 |
+
__SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.nc.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 138 |
+
__SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.nc.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
/******************************************************************************
|
| 142 |
+
* __ldcg *
|
| 143 |
+
******************************************************************************/
|
| 144 |
+
|
| 145 |
+
// Size of long is architecture and OS specific.
|
| 146 |
+
#if defined(__LP64__) // 64 bits
|
| 147 |
+
__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 148 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 149 |
+
#else // 32 bits
|
| 150 |
+
__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 151 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 152 |
+
#endif
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
__SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
|
| 156 |
+
__SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
|
| 157 |
+
__SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
|
| 158 |
+
__SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
|
| 159 |
+
__SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
|
| 160 |
+
__SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cg.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
|
| 161 |
+
__SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cg.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
|
| 162 |
+
__SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cg.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 163 |
+
__SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cg.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 164 |
+
__SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cg.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 165 |
+
__SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cg.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 166 |
+
__SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cg.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 167 |
+
|
| 168 |
+
__SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
|
| 169 |
+
__SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 170 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 171 |
+
__SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 172 |
+
__SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cg.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
|
| 173 |
+
__SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cg.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
|
| 174 |
+
__SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cg.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 175 |
+
__SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cg.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 176 |
+
__SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cg.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 177 |
+
__SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 178 |
+
__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cg.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 179 |
+
|
| 180 |
+
__SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) { float ret; asm volatile ("ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 181 |
+
__SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) { double ret; asm volatile ("ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 182 |
+
__SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cg.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 183 |
+
__SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cg.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 184 |
+
__SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cg.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 185 |
+
|
| 186 |
+
/******************************************************************************
|
| 187 |
+
* __ldca *
|
| 188 |
+
******************************************************************************/
|
| 189 |
+
|
| 190 |
+
// Size of long is architecture and OS specific.
|
| 191 |
+
#if defined(__LP64__) // 64 bits
|
| 192 |
+
__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 193 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 194 |
+
#else // 32 bits
|
| 195 |
+
__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 196 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 197 |
+
#endif
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
__SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
|
| 201 |
+
__SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
|
| 202 |
+
__SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
|
| 203 |
+
__SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
|
| 204 |
+
__SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
|
| 205 |
+
__SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.ca.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
|
| 206 |
+
__SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.ca.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
|
| 207 |
+
__SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) { short2 ret; asm volatile ("ld.global.ca.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 208 |
+
__SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) { short4 ret; asm volatile ("ld.global.ca.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 209 |
+
__SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) { int2 ret; asm volatile ("ld.global.ca.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 210 |
+
__SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) { int4 ret; asm volatile ("ld.global.ca.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 211 |
+
__SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.ca.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 212 |
+
|
| 213 |
+
__SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
|
| 214 |
+
__SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 215 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 216 |
+
__SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 217 |
+
__SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.ca.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
|
| 218 |
+
__SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.ca.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
|
| 219 |
+
__SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.ca.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 220 |
+
__SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.ca.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 221 |
+
__SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.ca.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 222 |
+
__SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.ca.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 223 |
+
__SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.ca.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 224 |
+
|
| 225 |
+
__SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) { float ret; asm volatile ("ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 226 |
+
__SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) { double ret; asm volatile ("ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 227 |
+
__SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) { float2 ret; asm volatile ("ld.global.ca.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 228 |
+
__SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) { float4 ret; asm volatile ("ld.global.ca.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 229 |
+
__SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) { double2 ret; asm volatile ("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 230 |
+
|
| 231 |
+
/******************************************************************************
|
| 232 |
+
* __ldcs *
|
| 233 |
+
******************************************************************************/
|
| 234 |
+
|
| 235 |
+
// Size of long is architecture and OS specific.
|
| 236 |
+
#if defined(__LP64__) // 64 bits
|
| 237 |
+
__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 238 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 239 |
+
#else // 32 bits
|
| 240 |
+
__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
|
| 241 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 242 |
+
#endif
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
__SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
|
| 246 |
+
__SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
|
| 247 |
+
__SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
|
| 248 |
+
__SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
|
| 249 |
+
__SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
|
| 250 |
+
__SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cs.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
|
| 251 |
+
__SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cs.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
|
| 252 |
+
__SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cs.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 253 |
+
__SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cs.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 254 |
+
__SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cs.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 255 |
+
__SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cs.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 256 |
+
__SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cs.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 257 |
+
|
| 258 |
+
__SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
|
| 259 |
+
__SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 260 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 261 |
+
__SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 262 |
+
__SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cs.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
|
| 263 |
+
__SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cs.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
|
| 264 |
+
__SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cs.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 265 |
+
__SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cs.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 266 |
+
__SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cs.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 267 |
+
__SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cs.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 268 |
+
__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cs.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 269 |
+
|
| 270 |
+
__SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) { float ret; asm volatile ("ld.global.cs.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 271 |
+
__SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) { double ret; asm volatile ("ld.global.cs.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
|
| 272 |
+
__SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cs.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 273 |
+
__SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cs.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
|
| 274 |
+
__SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
|
| 275 |
+
|
| 276 |
+
/******************************************************************************
|
| 277 |
+
* __ldlu *
|
| 278 |
+
******************************************************************************/
|
| 279 |
+
|
| 280 |
+
// Size of long is architecture and OS specific.
|
| 281 |
+
#if defined(__LP64__) // 64 bits
|
| 282 |
+
__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
|
| 283 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 284 |
+
#else // 32 bits
|
| 285 |
+
__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
|
| 286 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 287 |
+
#endif
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
__SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
|
| 291 |
+
__SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
|
| 292 |
+
__SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) { unsigned short ret; asm ("ld.global.lu.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
|
| 293 |
+
__SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) { unsigned int ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
|
| 294 |
+
__SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) { unsigned long long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
|
| 295 |
+
__SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.lu.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
|
| 296 |
+
__SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.lu.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
|
| 297 |
+
__SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) { short2 ret; asm ("ld.global.lu.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 298 |
+
__SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) { short4 ret; asm ("ld.global.lu.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 299 |
+
__SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) { int2 ret; asm ("ld.global.lu.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 300 |
+
__SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) { int4 ret; asm ("ld.global.lu.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 301 |
+
__SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.lu.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 302 |
+
|
| 303 |
+
__SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.lu.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
|
| 304 |
+
__SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.lu.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 305 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 306 |
+
__SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 307 |
+
__SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.lu.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
|
| 308 |
+
__SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.lu.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
|
| 309 |
+
__SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.lu.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 310 |
+
__SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.lu.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 311 |
+
__SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) { uint2 ret; asm ("ld.global.lu.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 312 |
+
__SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) { uint4 ret; asm ("ld.global.lu.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 313 |
+
__SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.lu.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 314 |
+
|
| 315 |
+
__SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) { float ret; asm ("ld.global.lu.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 316 |
+
__SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) { double ret; asm ("ld.global.lu.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 317 |
+
__SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) { float2 ret; asm ("ld.global.lu.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 318 |
+
__SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) { float4 ret; asm ("ld.global.lu.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 319 |
+
__SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) { double2 ret; asm ("ld.global.lu.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 320 |
+
|
| 321 |
+
/******************************************************************************
|
| 322 |
+
* __ldcv *
|
| 323 |
+
******************************************************************************/
|
| 324 |
+
|
| 325 |
+
// Size of long is architecture and OS specific.
|
| 326 |
+
#if defined(__LP64__) // 64 bits
|
| 327 |
+
__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
|
| 328 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 329 |
+
#else // 32 bits
|
| 330 |
+
__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
|
| 331 |
+
__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 332 |
+
#endif
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
__SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
|
| 336 |
+
__SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
|
| 337 |
+
__SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) { unsigned short ret; asm ("ld.global.cv.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
|
| 338 |
+
__SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) { unsigned int ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
|
| 339 |
+
__SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) { unsigned long long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
|
| 340 |
+
__SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.cv.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
|
| 341 |
+
__SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.cv.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
|
| 342 |
+
__SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) { short2 ret; asm ("ld.global.cv.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 343 |
+
__SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) { short4 ret; asm ("ld.global.cv.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 344 |
+
__SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) { int2 ret; asm ("ld.global.cv.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 345 |
+
__SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) { int4 ret; asm ("ld.global.cv.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 346 |
+
__SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.cv.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 347 |
+
|
| 348 |
+
__SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.cv.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
|
| 349 |
+
__SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.cv.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 350 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 351 |
+
__SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 352 |
+
__SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.cv.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
|
| 353 |
+
__SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.cv.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
|
| 354 |
+
__SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.cv.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 355 |
+
__SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.cv.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 356 |
+
__SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) { uint2 ret; asm ("ld.global.cv.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 357 |
+
__SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) { uint4 ret; asm ("ld.global.cv.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 358 |
+
__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.cv.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 359 |
+
|
| 360 |
+
__SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) { float ret; asm ("ld.global.cv.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 361 |
+
__SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) { double ret; asm ("ld.global.cv.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 362 |
+
__SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) { float2 ret; asm ("ld.global.cv.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 363 |
+
__SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) { float4 ret; asm ("ld.global.cv.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 364 |
+
__SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) { double2 ret; asm ("ld.global.cv.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
|
| 365 |
+
|
| 366 |
+
/******************************************************************************
|
| 367 |
+
* __stwb *
|
| 368 |
+
******************************************************************************/
|
| 369 |
+
|
| 370 |
+
// Size of long is architecture and OS specific.
|
| 371 |
+
#if defined(__LP64__) // 64 bits
|
| 372 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 373 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 374 |
+
#else // 32 bits
|
| 375 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 376 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 377 |
+
#endif
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 381 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 382 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) { asm ("st.global.wb.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 383 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 384 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 385 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 386 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 387 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) { asm ("st.global.wb.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 388 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) { asm ("st.global.wb.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 389 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) { asm ("st.global.wb.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 390 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) { asm ("st.global.wb.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 391 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) { asm ("st.global.wb.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 392 |
+
|
| 393 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) { asm ("st.global.wb.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 394 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) { asm ("st.global.wb.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 395 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 396 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 397 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 398 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 399 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) { asm ("st.global.wb.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 400 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) { asm ("st.global.wb.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 401 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) { asm ("st.global.wb.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 402 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) { asm ("st.global.wb.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 403 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wb.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 404 |
+
|
| 405 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) { asm ("st.global.wb.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
|
| 406 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) { asm ("st.global.wb.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
|
| 407 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) { asm ("st.global.wb.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
|
| 408 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) { asm ("st.global.wb.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
|
| 409 |
+
__SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) { asm ("st.global.wb.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
|
| 410 |
+
|
| 411 |
+
/******************************************************************************
|
| 412 |
+
* __stcg *
|
| 413 |
+
******************************************************************************/
|
| 414 |
+
|
| 415 |
+
// Size of long is architecture and OS specific.
|
| 416 |
+
#if defined(__LP64__) // 64 bits
|
| 417 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 418 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 419 |
+
#else // 32 bits
|
| 420 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 421 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 422 |
+
#endif
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 426 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 427 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) { asm ("st.global.cg.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 428 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 429 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 430 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 431 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 432 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) { asm ("st.global.cg.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 433 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) { asm ("st.global.cg.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 434 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) { asm ("st.global.cg.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 435 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) { asm ("st.global.cg.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 436 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) { asm ("st.global.cg.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 437 |
+
|
| 438 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) { asm ("st.global.cg.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 439 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) { asm ("st.global.cg.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 440 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 441 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 442 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 443 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 444 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) { asm ("st.global.cg.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 445 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) { asm ("st.global.cg.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 446 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) { asm ("st.global.cg.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 447 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) { asm ("st.global.cg.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 448 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cg.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 449 |
+
|
| 450 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) { asm ("st.global.cg.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
|
| 451 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) { asm ("st.global.cg.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
|
| 452 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) { asm ("st.global.cg.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
|
| 453 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) { asm ("st.global.cg.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
|
| 454 |
+
__SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) { asm ("st.global.cg.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
|
| 455 |
+
|
| 456 |
+
/******************************************************************************
|
| 457 |
+
* __stcs *
|
| 458 |
+
******************************************************************************/
|
| 459 |
+
|
| 460 |
+
// Size of long is architecture and OS specific.
|
| 461 |
+
#if defined(__LP64__) // 64 bits
|
| 462 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 463 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 464 |
+
#else // 32 bits
|
| 465 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 466 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 467 |
+
#endif
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 471 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 472 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) { asm ("st.global.cs.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 473 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 474 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 475 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 476 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 477 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) { asm ("st.global.cs.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 478 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) { asm ("st.global.cs.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 479 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) { asm ("st.global.cs.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 480 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) { asm ("st.global.cs.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 481 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) { asm ("st.global.cs.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 482 |
+
|
| 483 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) { asm ("st.global.cs.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 484 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) { asm ("st.global.cs.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 485 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 486 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 487 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 488 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 489 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) { asm ("st.global.cs.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 490 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) { asm ("st.global.cs.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 491 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) { asm ("st.global.cs.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 492 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) { asm ("st.global.cs.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 493 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cs.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 494 |
+
|
| 495 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) { asm ("st.global.cs.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
|
| 496 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) { asm ("st.global.cs.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
|
| 497 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) { asm ("st.global.cs.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
|
| 498 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) { asm ("st.global.cs.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
|
| 499 |
+
__SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) { asm ("st.global.cs.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
|
| 500 |
+
|
| 501 |
+
/******************************************************************************
|
| 502 |
+
* __stwt *
|
| 503 |
+
******************************************************************************/
|
| 504 |
+
|
| 505 |
+
// Size of long is architecture and OS specific.
|
| 506 |
+
#if defined(__LP64__) // 64 bits
|
| 507 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 508 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 509 |
+
#else // 32 bits
|
| 510 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 511 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 512 |
+
#endif
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 516 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 517 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) { asm ("st.global.wt.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 518 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 519 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 520 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 521 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 522 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) { asm ("st.global.wt.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 523 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) { asm ("st.global.wt.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 524 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) { asm ("st.global.wt.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 525 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) { asm ("st.global.wt.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 526 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) { asm ("st.global.wt.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 527 |
+
|
| 528 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) { asm ("st.global.wt.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
|
| 529 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) { asm ("st.global.wt.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
|
| 530 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
|
| 531 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
|
| 532 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
|
| 533 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
|
| 534 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) { asm ("st.global.wt.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
|
| 535 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) { asm ("st.global.wt.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
|
| 536 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) { asm ("st.global.wt.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
|
| 537 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) { asm ("st.global.wt.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
|
| 538 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wt.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
|
| 539 |
+
|
| 540 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) { asm ("st.global.wt.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
|
| 541 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) { asm ("st.global.wt.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
|
| 542 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) { asm ("st.global.wt.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
|
| 543 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) { asm ("st.global.wt.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
|
| 544 |
+
__SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) { asm ("st.global.wt.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
|
| 545 |
+
|
| 546 |
+
#undef __LDG_PTR
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
// SHF is the "funnel shift" operation - an accelerated left/right shift with carry
|
| 550 |
+
// operating on 64-bit quantities, which are concatenations of two 32-bit registers.
|
| 551 |
+
|
| 552 |
+
// This shifts [b:a] left by "shift" bits, returning the most significant bits of the result.
|
| 553 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift)
|
| 554 |
+
{
|
| 555 |
+
unsigned int ret;
|
| 556 |
+
asm volatile ("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
|
| 557 |
+
return ret;
|
| 558 |
+
}
|
| 559 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift)
|
| 560 |
+
{
|
| 561 |
+
unsigned int ret;
|
| 562 |
+
asm volatile ("shf.l.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
|
| 563 |
+
return ret;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
// This shifts [b:a] right by "shift" bits, returning the least significant bits of the result.
|
| 567 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift)
|
| 568 |
+
{
|
| 569 |
+
unsigned int ret;
|
| 570 |
+
asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
|
| 571 |
+
return ret;
|
| 572 |
+
}
|
| 573 |
+
__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift)
|
| 574 |
+
{
|
| 575 |
+
unsigned int ret;
|
| 576 |
+
asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
|
| 577 |
+
return ret;
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
|
| 582 |
+
|
| 583 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 584 |
+
|
| 585 |
+
#undef __SM_32_INTRINSICS_DECL__
|
| 586 |
+
|
| 587 |
+
#endif /* !__SM_32_INTRINSICS_HPP__ */
|
| 588 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_60_ATOMIC_FUNCTIONS_H__)
|
| 51 |
+
#define __SM_60_ATOMIC_FUNCTIONS_H__
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
#if defined(__CUDACC_RTC__)
|
| 55 |
+
#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__
|
| 56 |
+
#elif defined(_NVHPC_CUDA)
|
| 57 |
+
#define __SM_60_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__
|
| 58 |
+
#else /* __CUDACC_RTC__ */
|
| 59 |
+
#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
|
| 60 |
+
#endif /* __CUDACC_RTC__ */
|
| 61 |
+
|
| 62 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 63 |
+
|
| 64 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
|
| 65 |
+
|
| 66 |
+
/*******************************************************************************
|
| 67 |
+
* *
|
| 68 |
+
* *
|
| 69 |
+
* *
|
| 70 |
+
*******************************************************************************/
|
| 71 |
+
|
| 72 |
+
#include "cuda_runtime_api.h"
|
| 73 |
+
|
| 74 |
+
/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA
|
| 75 |
+
* C++ compiler where the macro __CUDA_ARCH__ is not defined. */
|
| 76 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 77 |
+
#define __DEF_IF_HOST { }
|
| 78 |
+
#else /* !__CUDA_ARCH__ */
|
| 79 |
+
#define __DEF_IF_HOST ;
|
| 80 |
+
#endif /* __CUDA_ARCH__ */
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
#ifdef __CUDA_ARCH__
|
| 85 |
+
extern "C"
|
| 86 |
+
{
|
| 87 |
+
extern __device__ __device_builtin__ double __dAtomicAdd(double *address, double val);
|
| 88 |
+
|
| 89 |
+
extern __device__ __device_builtin__
|
| 90 |
+
int __iAtomicAdd_block(int *address, int val);
|
| 91 |
+
|
| 92 |
+
extern __device__ __device_builtin__
|
| 93 |
+
int __iAtomicAdd_system(int *address, int val);
|
| 94 |
+
|
| 95 |
+
extern __device__ __device_builtin__
|
| 96 |
+
unsigned int __uAtomicAdd_block(unsigned int *address, unsigned int val);
|
| 97 |
+
|
| 98 |
+
extern __device__ __device_builtin__
|
| 99 |
+
unsigned int __uAtomicAdd_system(unsigned int *address, unsigned int val);
|
| 100 |
+
|
| 101 |
+
extern __device__ __device_builtin__
|
| 102 |
+
unsigned long long __ullAtomicAdd_block(unsigned long long *address, unsigned long long val);
|
| 103 |
+
|
| 104 |
+
extern __device__ __device_builtin__
|
| 105 |
+
unsigned long long __ullAtomicAdd_system(unsigned long long *address, unsigned long long val);
|
| 106 |
+
|
| 107 |
+
extern __device__ __device_builtin__
|
| 108 |
+
float __fAtomicAdd_block(float *address, float val);
|
| 109 |
+
|
| 110 |
+
extern __device__ __device_builtin__
|
| 111 |
+
float __fAtomicAdd_system(float *address, float val);
|
| 112 |
+
|
| 113 |
+
extern __device__ __device_builtin__
|
| 114 |
+
double __dAtomicAdd_block(double *address, double val);
|
| 115 |
+
|
| 116 |
+
extern __device__ __device_builtin__
|
| 117 |
+
double __dAtomicAdd_system(double *address, double val);
|
| 118 |
+
|
| 119 |
+
extern __device__ __device_builtin__
|
| 120 |
+
int __iAtomicExch_block(int *address, int val);
|
| 121 |
+
|
| 122 |
+
extern __device__ __device_builtin__
|
| 123 |
+
int __iAtomicExch_system(int *address, int val);
|
| 124 |
+
|
| 125 |
+
extern __device__ __device_builtin__
|
| 126 |
+
unsigned int __uAtomicExch_block(unsigned int *address, unsigned int val);
|
| 127 |
+
|
| 128 |
+
extern __device__ __device_builtin__
|
| 129 |
+
unsigned int __uAtomicExch_system(unsigned int *address, unsigned int val);
|
| 130 |
+
|
| 131 |
+
extern __device__ __device_builtin__
|
| 132 |
+
unsigned long long __ullAtomicExch_block(unsigned long long *address, unsigned long long val);
|
| 133 |
+
|
| 134 |
+
extern __device__ __device_builtin__
|
| 135 |
+
unsigned long long __ullAtomicExch_system(unsigned long long *address, unsigned long long val);
|
| 136 |
+
|
| 137 |
+
extern __device__ __device_builtin__
|
| 138 |
+
float __fAtomicExch_block(float *address, float val);
|
| 139 |
+
|
| 140 |
+
extern __device__ __device_builtin__
|
| 141 |
+
float __fAtomicExch_system(float *address, float val);
|
| 142 |
+
|
| 143 |
+
extern __device__ __device_builtin__
|
| 144 |
+
int __iAtomicMin_block(int *address, int val);
|
| 145 |
+
|
| 146 |
+
extern __device__ __device_builtin__
|
| 147 |
+
int __iAtomicMin_system(int *address, int val);
|
| 148 |
+
|
| 149 |
+
extern __device__ __device_builtin__
|
| 150 |
+
long long __illAtomicMin_block(long long *address, long long val);
|
| 151 |
+
|
| 152 |
+
extern __device__ __device_builtin__
|
| 153 |
+
long long __illAtomicMin_system(long long *address, long long val);
|
| 154 |
+
|
| 155 |
+
extern __device__ __device_builtin__
|
| 156 |
+
unsigned int __uAtomicMin_block(unsigned int *address, unsigned int val);
|
| 157 |
+
|
| 158 |
+
extern __device__ __device_builtin__
|
| 159 |
+
unsigned int __uAtomicMin_system(unsigned int *address, unsigned int val);
|
| 160 |
+
|
| 161 |
+
extern __device__ __device_builtin__
|
| 162 |
+
unsigned long long __ullAtomicMin_block(unsigned long long *address, unsigned long long val);
|
| 163 |
+
|
| 164 |
+
extern __device__ __device_builtin__
|
| 165 |
+
unsigned long long __ullAtomicMin_system(unsigned long long *address, unsigned long long val);
|
| 166 |
+
|
| 167 |
+
extern __device__ __device_builtin__
|
| 168 |
+
int __iAtomicMax_block(int *address, int val);
|
| 169 |
+
|
| 170 |
+
extern __device__ __device_builtin__
|
| 171 |
+
int __iAtomicMax_system(int *address, int val);
|
| 172 |
+
|
| 173 |
+
extern __device__ __device_builtin__
|
| 174 |
+
long long __illAtomicMax_block(long long *address, long long val);
|
| 175 |
+
|
| 176 |
+
extern __device__ __device_builtin__
|
| 177 |
+
long long __illAtomicMax_system(long long *address, long long val);
|
| 178 |
+
|
| 179 |
+
extern __device__ __device_builtin__
|
| 180 |
+
unsigned int __uAtomicMax_block(unsigned int *address, unsigned int val);
|
| 181 |
+
|
| 182 |
+
extern __device__ __device_builtin__
|
| 183 |
+
unsigned int __uAtomicMax_system(unsigned int *address, unsigned int val);
|
| 184 |
+
|
| 185 |
+
extern __device__ __device_builtin__
|
| 186 |
+
unsigned long long __ullAtomicMax_block(unsigned long long *address, unsigned long long val);
|
| 187 |
+
|
| 188 |
+
extern __device__ __device_builtin__
|
| 189 |
+
unsigned long long __ullAtomicMax_system(unsigned long long *address, unsigned long long val);
|
| 190 |
+
|
| 191 |
+
extern __device__ __device_builtin__
|
| 192 |
+
unsigned int __uAtomicInc_block(unsigned int *address, unsigned int val);
|
| 193 |
+
|
| 194 |
+
extern __device__ __device_builtin__
|
| 195 |
+
unsigned int __uAtomicInc_system(unsigned int *address, unsigned int val);
|
| 196 |
+
|
| 197 |
+
extern __device__ __device_builtin__
|
| 198 |
+
unsigned int __uAtomicDec_block(unsigned int *address, unsigned int val);
|
| 199 |
+
|
| 200 |
+
extern __device__ __device_builtin__
|
| 201 |
+
unsigned int __uAtomicDec_system(unsigned int *address, unsigned int val);
|
| 202 |
+
|
| 203 |
+
extern __device__ __device_builtin__
|
| 204 |
+
int __iAtomicCAS_block(int *address, int compare, int val);
|
| 205 |
+
|
| 206 |
+
extern __device__ __device_builtin__
|
| 207 |
+
int __iAtomicCAS_system(int *address, int compare, int val);
|
| 208 |
+
|
| 209 |
+
extern __device__ __device_builtin__
|
| 210 |
+
unsigned int __uAtomicCAS_block(unsigned int *address, unsigned int compare,
|
| 211 |
+
unsigned int val);
|
| 212 |
+
|
| 213 |
+
extern __device__ __device_builtin__
|
| 214 |
+
unsigned int __uAtomicCAS_system(unsigned int *address, unsigned int compare,
|
| 215 |
+
unsigned int val);
|
| 216 |
+
|
| 217 |
+
extern __device__ __device_builtin__
|
| 218 |
+
unsigned long long __ullAtomicCAS_block(unsigned long long int *address,
|
| 219 |
+
unsigned long long int compare,
|
| 220 |
+
unsigned long long int val);
|
| 221 |
+
|
| 222 |
+
extern __device__ __device_builtin__
|
| 223 |
+
unsigned long long __ullAtomicCAS_system(unsigned long long int *address,
|
| 224 |
+
unsigned long long int compare,
|
| 225 |
+
unsigned long long int val);
|
| 226 |
+
|
| 227 |
+
extern __device__ __device_builtin__
|
| 228 |
+
int __iAtomicAnd_block(int *address, int val);
|
| 229 |
+
|
| 230 |
+
extern __device__ __device_builtin__
|
| 231 |
+
int __iAtomicAnd_system(int *address, int val);
|
| 232 |
+
|
| 233 |
+
extern __device__ __device_builtin__
|
| 234 |
+
long long __llAtomicAnd_block(long long *address, long long val);
|
| 235 |
+
|
| 236 |
+
extern __device__ __device_builtin__
|
| 237 |
+
long long __llAtomicAnd_system(long long *address, long long val);
|
| 238 |
+
|
| 239 |
+
extern __device__ __device_builtin__
|
| 240 |
+
unsigned int __uAtomicAnd_block(unsigned int *address, unsigned int val);
|
| 241 |
+
|
| 242 |
+
extern __device__ __device_builtin__
|
| 243 |
+
unsigned int __uAtomicAnd_system(unsigned int *address, unsigned int val);
|
| 244 |
+
|
| 245 |
+
extern __device__ __device_builtin__
|
| 246 |
+
unsigned long long __ullAtomicAnd_block(unsigned long long *address, unsigned long long val);
|
| 247 |
+
|
| 248 |
+
extern __device__ __device_builtin__
|
| 249 |
+
unsigned long long __ullAtomicAnd_system(unsigned long long *address, unsigned long long val);
|
| 250 |
+
|
| 251 |
+
extern __device__ __device_builtin__
|
| 252 |
+
int __iAtomicOr_block(int *address, int val);
|
| 253 |
+
|
| 254 |
+
extern __device__ __device_builtin__
|
| 255 |
+
int __iAtomicOr_system(int *address, int val);
|
| 256 |
+
|
| 257 |
+
extern __device__ __device_builtin__
|
| 258 |
+
long long __llAtomicOr_block(long long *address, long long val);
|
| 259 |
+
|
| 260 |
+
extern __device__ __device_builtin__
|
| 261 |
+
long long __llAtomicOr_system(long long *address, long long val);
|
| 262 |
+
|
| 263 |
+
extern __device__ __device_builtin__
|
| 264 |
+
unsigned int __uAtomicOr_block(unsigned int *address, unsigned int val);
|
| 265 |
+
|
| 266 |
+
extern __device__ __device_builtin__
|
| 267 |
+
unsigned int __uAtomicOr_system(unsigned int *address, unsigned int val);
|
| 268 |
+
|
| 269 |
+
extern __device__ __device_builtin__
|
| 270 |
+
unsigned long long __ullAtomicOr_block(unsigned long long *address, unsigned long long val);
|
| 271 |
+
|
| 272 |
+
extern __device__ __device_builtin__
|
| 273 |
+
unsigned long long __ullAtomicOr_system(unsigned long long *address, unsigned long long val);
|
| 274 |
+
|
| 275 |
+
extern __device__ __device_builtin__
|
| 276 |
+
int __iAtomicXor_block(int *address, int val);
|
| 277 |
+
|
| 278 |
+
extern __device__ __device_builtin__
|
| 279 |
+
int __iAtomicXor_system(int *address, int val);
|
| 280 |
+
|
| 281 |
+
extern __device__ __device_builtin__
|
| 282 |
+
long long __llAtomicXor_block(long long *address, long long val);
|
| 283 |
+
|
| 284 |
+
extern __device__ __device_builtin__
|
| 285 |
+
long long __llAtomicXor_system(long long *address, long long val);
|
| 286 |
+
|
| 287 |
+
extern __device__ __device_builtin__
|
| 288 |
+
unsigned int __uAtomicXor_block(unsigned int *address, unsigned int val);
|
| 289 |
+
|
| 290 |
+
extern __device__ __device_builtin__
|
| 291 |
+
unsigned int __uAtomicXor_system(unsigned int *address, unsigned int val);
|
| 292 |
+
|
| 293 |
+
extern __device__ __device_builtin__
|
| 294 |
+
unsigned long long __ullAtomicXor_block(unsigned long long *address, unsigned long long val);
|
| 295 |
+
|
| 296 |
+
extern __device__ __device_builtin__
|
| 297 |
+
unsigned long long __ullAtomicXor_system(unsigned long long *address, unsigned long long val);
|
| 298 |
+
}
|
| 299 |
+
#endif /* __CUDA_ARCH__ */
|
| 300 |
+
|
| 301 |
+
/*******************************************************************************
|
| 302 |
+
* *
|
| 303 |
+
* *
|
| 304 |
+
* *
|
| 305 |
+
*******************************************************************************/
|
| 306 |
+
|
| 307 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) __DEF_IF_HOST
|
| 308 |
+
|
| 309 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 310 |
+
int atomicAdd_block(int *address, int val) __DEF_IF_HOST
|
| 311 |
+
|
| 312 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 313 |
+
int atomicAdd_system(int *address, int val) __DEF_IF_HOST
|
| 314 |
+
|
| 315 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 316 |
+
unsigned int atomicAdd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 317 |
+
|
| 318 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 319 |
+
unsigned int atomicAdd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 320 |
+
|
| 321 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 322 |
+
unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 323 |
+
|
| 324 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 325 |
+
unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 326 |
+
|
| 327 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 328 |
+
float atomicAdd_block(float *address, float val) __DEF_IF_HOST
|
| 329 |
+
|
| 330 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 331 |
+
float atomicAdd_system(float *address, float val) __DEF_IF_HOST
|
| 332 |
+
|
| 333 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 334 |
+
double atomicAdd_block(double *address, double val) __DEF_IF_HOST
|
| 335 |
+
|
| 336 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 337 |
+
double atomicAdd_system(double *address, double val) __DEF_IF_HOST
|
| 338 |
+
|
| 339 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 340 |
+
int atomicSub_block(int *address, int val) __DEF_IF_HOST
|
| 341 |
+
|
| 342 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 343 |
+
int atomicSub_system(int *address, int val) __DEF_IF_HOST
|
| 344 |
+
|
| 345 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 346 |
+
unsigned int atomicSub_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 347 |
+
|
| 348 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 349 |
+
unsigned int atomicSub_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 350 |
+
|
| 351 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 352 |
+
int atomicExch_block(int *address, int val) __DEF_IF_HOST
|
| 353 |
+
|
| 354 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 355 |
+
int atomicExch_system(int *address, int val) __DEF_IF_HOST
|
| 356 |
+
|
| 357 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 358 |
+
unsigned int atomicExch_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 359 |
+
|
| 360 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 361 |
+
unsigned int atomicExch_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 362 |
+
|
| 363 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 364 |
+
unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 365 |
+
|
| 366 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 367 |
+
unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 368 |
+
|
| 369 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 370 |
+
float atomicExch_block(float *address, float val) __DEF_IF_HOST
|
| 371 |
+
|
| 372 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 373 |
+
float atomicExch_system(float *address, float val) __DEF_IF_HOST
|
| 374 |
+
|
| 375 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 376 |
+
int atomicMin_block(int *address, int val) __DEF_IF_HOST
|
| 377 |
+
|
| 378 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 379 |
+
int atomicMin_system(int *address, int val) __DEF_IF_HOST
|
| 380 |
+
|
| 381 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 382 |
+
long long atomicMin_block(long long *address, long long val) __DEF_IF_HOST
|
| 383 |
+
|
| 384 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 385 |
+
long long atomicMin_system(long long *address, long long val) __DEF_IF_HOST
|
| 386 |
+
|
| 387 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 388 |
+
unsigned int atomicMin_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 389 |
+
|
| 390 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 391 |
+
unsigned int atomicMin_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 392 |
+
|
| 393 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 394 |
+
unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 395 |
+
|
| 396 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 397 |
+
unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 398 |
+
|
| 399 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 400 |
+
int atomicMax_block(int *address, int val) __DEF_IF_HOST
|
| 401 |
+
|
| 402 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 403 |
+
int atomicMax_system(int *address, int val) __DEF_IF_HOST
|
| 404 |
+
|
| 405 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 406 |
+
long long atomicMax_block(long long *address, long long val) __DEF_IF_HOST
|
| 407 |
+
|
| 408 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 409 |
+
long long atomicMax_system(long long *address, long long val) __DEF_IF_HOST
|
| 410 |
+
|
| 411 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 412 |
+
unsigned int atomicMax_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 413 |
+
|
| 414 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 415 |
+
unsigned int atomicMax_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 416 |
+
|
| 417 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 418 |
+
unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 419 |
+
|
| 420 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 421 |
+
unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 422 |
+
|
| 423 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 424 |
+
unsigned int atomicInc_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 425 |
+
|
| 426 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 427 |
+
unsigned int atomicInc_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 428 |
+
|
| 429 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 430 |
+
unsigned int atomicDec_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 431 |
+
|
| 432 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 433 |
+
unsigned int atomicDec_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 434 |
+
|
| 435 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 436 |
+
int atomicCAS_block(int *address, int compare, int val) __DEF_IF_HOST
|
| 437 |
+
|
| 438 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 439 |
+
int atomicCAS_system(int *address, int compare, int val) __DEF_IF_HOST
|
| 440 |
+
|
| 441 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 442 |
+
unsigned int atomicCAS_block(unsigned int *address, unsigned int compare,
|
| 443 |
+
unsigned int val) __DEF_IF_HOST
|
| 444 |
+
|
| 445 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 446 |
+
unsigned int atomicCAS_system(unsigned int *address, unsigned int compare,
|
| 447 |
+
unsigned int val) __DEF_IF_HOST
|
| 448 |
+
|
| 449 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 450 |
+
unsigned long long int atomicCAS_block(unsigned long long int *address,
|
| 451 |
+
unsigned long long int compare,
|
| 452 |
+
unsigned long long int val) __DEF_IF_HOST
|
| 453 |
+
|
| 454 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 455 |
+
unsigned long long int atomicCAS_system(unsigned long long int *address,
|
| 456 |
+
unsigned long long int compare,
|
| 457 |
+
unsigned long long int val) __DEF_IF_HOST
|
| 458 |
+
|
| 459 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 460 |
+
int atomicAnd_block(int *address, int val) __DEF_IF_HOST
|
| 461 |
+
|
| 462 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 463 |
+
int atomicAnd_system(int *address, int val) __DEF_IF_HOST
|
| 464 |
+
|
| 465 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 466 |
+
long long atomicAnd_block(long long *address, long long val) __DEF_IF_HOST
|
| 467 |
+
|
| 468 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 469 |
+
long long atomicAnd_system(long long *address, long long val) __DEF_IF_HOST
|
| 470 |
+
|
| 471 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 472 |
+
unsigned int atomicAnd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 473 |
+
|
| 474 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 475 |
+
unsigned int atomicAnd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 476 |
+
|
| 477 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 478 |
+
unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 479 |
+
|
| 480 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 481 |
+
unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 482 |
+
|
| 483 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 484 |
+
int atomicOr_block(int *address, int val) __DEF_IF_HOST
|
| 485 |
+
|
| 486 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 487 |
+
int atomicOr_system(int *address, int val) __DEF_IF_HOST
|
| 488 |
+
|
| 489 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 490 |
+
long long atomicOr_block(long long *address, long long val) __DEF_IF_HOST
|
| 491 |
+
|
| 492 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 493 |
+
long long atomicOr_system(long long *address, long long val) __DEF_IF_HOST
|
| 494 |
+
|
| 495 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 496 |
+
unsigned int atomicOr_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 497 |
+
|
| 498 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 499 |
+
unsigned int atomicOr_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 500 |
+
|
| 501 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 502 |
+
unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 503 |
+
|
| 504 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 505 |
+
unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 506 |
+
|
| 507 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 508 |
+
int atomicXor_block(int *address, int val) __DEF_IF_HOST
|
| 509 |
+
|
| 510 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 511 |
+
int atomicXor_system(int *address, int val) __DEF_IF_HOST
|
| 512 |
+
|
| 513 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 514 |
+
long long atomicXor_block(long long *address, long long val) __DEF_IF_HOST
|
| 515 |
+
|
| 516 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 517 |
+
long long atomicXor_system(long long *address, long long val) __DEF_IF_HOST
|
| 518 |
+
|
| 519 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 520 |
+
unsigned int atomicXor_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 521 |
+
|
| 522 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 523 |
+
unsigned int atomicXor_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
|
| 524 |
+
|
| 525 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 526 |
+
unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 527 |
+
|
| 528 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 529 |
+
unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
|
| 530 |
+
|
| 531 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */
|
| 532 |
+
|
| 533 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 534 |
+
|
| 535 |
+
#undef __SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 536 |
+
#undef __DEF_IF_HOST
|
| 537 |
+
|
| 538 |
+
#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
|
| 539 |
+
#include "sm_60_atomic_functions.hpp"
|
| 540 |
+
#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
|
| 541 |
+
|
| 542 |
+
#endif /* !__SM_60_ATOMIC_FUNCTIONS_H__ */
|
| 543 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_60_ATOMIC_FUNCTIONS_HPP__)
|
| 51 |
+
#define __SM_60_ATOMIC_FUNCTIONS_HPP__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__
|
| 55 |
+
#else /* __CUDACC_RTC__ */
|
| 56 |
+
#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
/*******************************************************************************
|
| 72 |
+
* *
|
| 73 |
+
* *
|
| 74 |
+
* *
|
| 75 |
+
*******************************************************************************/
|
| 76 |
+
|
| 77 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val)
|
| 78 |
+
{
|
| 79 |
+
return __dAtomicAdd(address, val);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 83 |
+
int atomicAdd_block(int *address, int val)
|
| 84 |
+
{
|
| 85 |
+
return __iAtomicAdd_block(address, val);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 89 |
+
int atomicAdd_system(int *address, int val)
|
| 90 |
+
{
|
| 91 |
+
return __iAtomicAdd_system(address, val);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 95 |
+
unsigned int atomicAdd_block(unsigned int *address, unsigned int val)
|
| 96 |
+
{
|
| 97 |
+
return __uAtomicAdd_block(address, val);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 101 |
+
unsigned int atomicAdd_system(unsigned int *address, unsigned int val)
|
| 102 |
+
{
|
| 103 |
+
return __uAtomicAdd_system(address, val);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 107 |
+
unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val)
|
| 108 |
+
{
|
| 109 |
+
return __ullAtomicAdd_block(address, val);
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 113 |
+
unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val)
|
| 114 |
+
{
|
| 115 |
+
return __ullAtomicAdd_system(address, val);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 119 |
+
float atomicAdd_block(float *address, float val)
|
| 120 |
+
{
|
| 121 |
+
return __fAtomicAdd_block(address, val);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 125 |
+
float atomicAdd_system(float *address, float val)
|
| 126 |
+
{
|
| 127 |
+
return __fAtomicAdd_system(address, val);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 131 |
+
double atomicAdd_block(double *address, double val)
|
| 132 |
+
{
|
| 133 |
+
return __dAtomicAdd_block(address, val);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 137 |
+
double atomicAdd_system(double *address, double val)
|
| 138 |
+
{
|
| 139 |
+
return __dAtomicAdd_system(address, val);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 143 |
+
int atomicSub_block(int *address, int val)
|
| 144 |
+
{
|
| 145 |
+
return __iAtomicAdd_block(address, (unsigned int)-(int)val);
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 149 |
+
int atomicSub_system(int *address, int val)
|
| 150 |
+
{
|
| 151 |
+
return __iAtomicAdd_system(address, (unsigned int)-(int)val);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 155 |
+
unsigned int atomicSub_block(unsigned int *address, unsigned int val)
|
| 156 |
+
{
|
| 157 |
+
return __uAtomicAdd_block(address, (unsigned int)-(int)val);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 161 |
+
unsigned int atomicSub_system(unsigned int *address, unsigned int val)
|
| 162 |
+
{
|
| 163 |
+
return __uAtomicAdd_system(address, (unsigned int)-(int)val);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 167 |
+
int atomicExch_block(int *address, int val)
|
| 168 |
+
{
|
| 169 |
+
return __iAtomicExch_block(address, val);
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 173 |
+
int atomicExch_system(int *address, int val)
|
| 174 |
+
{
|
| 175 |
+
return __iAtomicExch_system(address, val);
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 179 |
+
unsigned int atomicExch_block(unsigned int *address, unsigned int val)
|
| 180 |
+
{
|
| 181 |
+
return __uAtomicExch_block(address, val);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 185 |
+
unsigned int atomicExch_system(unsigned int *address, unsigned int val)
|
| 186 |
+
{
|
| 187 |
+
return __uAtomicExch_system(address, val);
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 191 |
+
unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val)
|
| 192 |
+
{
|
| 193 |
+
return __ullAtomicExch_block(address, val);
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 197 |
+
unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val)
|
| 198 |
+
{
|
| 199 |
+
return __ullAtomicExch_system(address, val);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 203 |
+
float atomicExch_block(float *address, float val)
|
| 204 |
+
{
|
| 205 |
+
return __fAtomicExch_block(address, val);
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 209 |
+
float atomicExch_system(float *address, float val)
|
| 210 |
+
{
|
| 211 |
+
return __fAtomicExch_system(address, val);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 215 |
+
int atomicMin_block(int *address, int val)
|
| 216 |
+
{
|
| 217 |
+
return __iAtomicMin_block(address, val);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 221 |
+
int atomicMin_system(int *address, int val)
|
| 222 |
+
{
|
| 223 |
+
return __iAtomicMin_system(address, val);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 227 |
+
long long atomicMin_block(long long *address, long long val)
|
| 228 |
+
{
|
| 229 |
+
return __illAtomicMin_block(address, val);
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 233 |
+
long long atomicMin_system(long long *address, long long val)
|
| 234 |
+
{
|
| 235 |
+
return __illAtomicMin_system(address, val);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 239 |
+
unsigned int atomicMin_block(unsigned int *address, unsigned int val)
|
| 240 |
+
{
|
| 241 |
+
return __uAtomicMin_block(address, val);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 245 |
+
unsigned int atomicMin_system(unsigned int *address, unsigned int val)
|
| 246 |
+
{
|
| 247 |
+
return __uAtomicMin_system(address, val);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 251 |
+
unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val)
|
| 252 |
+
{
|
| 253 |
+
return __ullAtomicMin_block(address, val);
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 257 |
+
unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val)
|
| 258 |
+
{
|
| 259 |
+
return __ullAtomicMin_system(address, val);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 263 |
+
int atomicMax_block(int *address, int val)
|
| 264 |
+
{
|
| 265 |
+
return __iAtomicMax_block(address, val);
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 269 |
+
int atomicMax_system(int *address, int val)
|
| 270 |
+
{
|
| 271 |
+
return __iAtomicMax_system(address, val);
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 275 |
+
long long atomicMax_block(long long *address, long long val)
|
| 276 |
+
{
|
| 277 |
+
return __illAtomicMax_block(address, val);
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 281 |
+
long long atomicMax_system(long long *address, long long val)
|
| 282 |
+
{
|
| 283 |
+
return __illAtomicMax_system(address, val);
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 287 |
+
unsigned int atomicMax_block(unsigned int *address, unsigned int val)
|
| 288 |
+
{
|
| 289 |
+
return __uAtomicMax_block(address, val);
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 293 |
+
unsigned int atomicMax_system(unsigned int *address, unsigned int val)
|
| 294 |
+
{
|
| 295 |
+
return __uAtomicMax_system(address, val);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 299 |
+
unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val)
|
| 300 |
+
{
|
| 301 |
+
return __ullAtomicMax_block(address, val);
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 305 |
+
unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val)
|
| 306 |
+
{
|
| 307 |
+
return __ullAtomicMax_system(address, val);
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 311 |
+
unsigned int atomicInc_block(unsigned int *address, unsigned int val)
|
| 312 |
+
{
|
| 313 |
+
return __uAtomicInc_block(address, val);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 317 |
+
unsigned int atomicInc_system(unsigned int *address, unsigned int val)
|
| 318 |
+
{
|
| 319 |
+
return __uAtomicInc_system(address, val);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 323 |
+
unsigned int atomicDec_block(unsigned int *address, unsigned int val)
|
| 324 |
+
{
|
| 325 |
+
return __uAtomicDec_block(address, val);
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 329 |
+
unsigned int atomicDec_system(unsigned int *address, unsigned int val)
|
| 330 |
+
{
|
| 331 |
+
return __uAtomicDec_system(address, val);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 335 |
+
int atomicCAS_block(int *address, int compare, int val)
|
| 336 |
+
{
|
| 337 |
+
return __iAtomicCAS_block(address, compare, val);
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 341 |
+
int atomicCAS_system(int *address, int compare, int val)
|
| 342 |
+
{
|
| 343 |
+
return __iAtomicCAS_system(address, compare, val);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 347 |
+
unsigned int atomicCAS_block(unsigned int *address, unsigned int compare,
|
| 348 |
+
unsigned int val)
|
| 349 |
+
{
|
| 350 |
+
return __uAtomicCAS_block(address, compare, val);
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 354 |
+
unsigned int atomicCAS_system(unsigned int *address, unsigned int compare,
|
| 355 |
+
unsigned int val)
|
| 356 |
+
{
|
| 357 |
+
return __uAtomicCAS_system(address, compare, val);
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 361 |
+
unsigned long long int atomicCAS_block(unsigned long long int *address,
|
| 362 |
+
unsigned long long int compare,
|
| 363 |
+
unsigned long long int val)
|
| 364 |
+
{
|
| 365 |
+
return __ullAtomicCAS_block(address, compare, val);
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 369 |
+
unsigned long long int atomicCAS_system(unsigned long long int *address,
|
| 370 |
+
unsigned long long int compare,
|
| 371 |
+
unsigned long long int val)
|
| 372 |
+
{
|
| 373 |
+
return __ullAtomicCAS_system(address, compare, val);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 377 |
+
int atomicAnd_block(int *address, int val)
|
| 378 |
+
{
|
| 379 |
+
return __iAtomicAnd_block(address, val);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 383 |
+
int atomicAnd_system(int *address, int val)
|
| 384 |
+
{
|
| 385 |
+
return __iAtomicAnd_system(address, val);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 389 |
+
long long atomicAnd_block(long long *address, long long val)
|
| 390 |
+
{
|
| 391 |
+
return __llAtomicAnd_block(address, val);
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 395 |
+
long long atomicAnd_system(long long *address, long long val)
|
| 396 |
+
{
|
| 397 |
+
return __llAtomicAnd_system(address, val);
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 401 |
+
unsigned int atomicAnd_block(unsigned int *address, unsigned int val)
|
| 402 |
+
{
|
| 403 |
+
return __uAtomicAnd_block(address, val);
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 407 |
+
unsigned int atomicAnd_system(unsigned int *address, unsigned int val)
|
| 408 |
+
{
|
| 409 |
+
return __uAtomicAnd_system(address, val);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 413 |
+
unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val)
|
| 414 |
+
{
|
| 415 |
+
return __ullAtomicAnd_block(address, val);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 419 |
+
unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val)
|
| 420 |
+
{
|
| 421 |
+
return __ullAtomicAnd_system(address, val);
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 425 |
+
int atomicOr_block(int *address, int val)
|
| 426 |
+
{
|
| 427 |
+
return __iAtomicOr_block(address, val);
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 431 |
+
int atomicOr_system(int *address, int val)
|
| 432 |
+
{
|
| 433 |
+
return __iAtomicOr_system(address, val);
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 437 |
+
long long atomicOr_block(long long *address, long long val)
|
| 438 |
+
{
|
| 439 |
+
return __llAtomicOr_block(address, val);
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 443 |
+
long long atomicOr_system(long long *address, long long val)
|
| 444 |
+
{
|
| 445 |
+
return __llAtomicOr_system(address, val);
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 449 |
+
unsigned int atomicOr_block(unsigned int *address, unsigned int val)
|
| 450 |
+
{
|
| 451 |
+
return __uAtomicOr_block(address, val);
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 455 |
+
unsigned int atomicOr_system(unsigned int *address, unsigned int val)
|
| 456 |
+
{
|
| 457 |
+
return __uAtomicOr_system(address, val);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 461 |
+
unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val)
|
| 462 |
+
{
|
| 463 |
+
return __ullAtomicOr_block(address, val);
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 467 |
+
unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val)
|
| 468 |
+
{
|
| 469 |
+
return __ullAtomicOr_system(address, val);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 473 |
+
int atomicXor_block(int *address, int val)
|
| 474 |
+
{
|
| 475 |
+
return __iAtomicXor_block(address, val);
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 479 |
+
int atomicXor_system(int *address, int val)
|
| 480 |
+
{
|
| 481 |
+
return __iAtomicXor_system(address, val);
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 485 |
+
long long atomicXor_block(long long *address, long long val)
|
| 486 |
+
{
|
| 487 |
+
return __llAtomicXor_block(address, val);
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 491 |
+
long long atomicXor_system(long long *address, long long val)
|
| 492 |
+
{
|
| 493 |
+
return __llAtomicXor_system(address, val);
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 497 |
+
unsigned int atomicXor_block(unsigned int *address, unsigned int val)
|
| 498 |
+
{
|
| 499 |
+
return __uAtomicXor_block(address, val);
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 503 |
+
unsigned int atomicXor_system(unsigned int *address, unsigned int val)
|
| 504 |
+
{
|
| 505 |
+
return __uAtomicXor_system(address, val);
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 509 |
+
unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val)
|
| 510 |
+
{
|
| 511 |
+
return __ullAtomicXor_block(address, val);
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
__SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 515 |
+
unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val)
|
| 516 |
+
{
|
| 517 |
+
return __ullAtomicXor_system(address, val);
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */
|
| 521 |
+
|
| 522 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 523 |
+
|
| 524 |
+
#undef __SM_60_ATOMIC_FUNCTIONS_DECL__
|
| 525 |
+
|
| 526 |
+
#endif /* !__SM_60_ATOMIC_FUNCTIONS_HPP__ */
|
| 527 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_61_intrinsics.hpp
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2016 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SM_61_INTRINSICS_HPP__)
|
| 51 |
+
#define __SM_61_INTRINSICS_HPP__
|
| 52 |
+
|
| 53 |
+
#if defined(__CUDACC_RTC__)
|
| 54 |
+
#define __SM_61_INTRINSICS_DECL__ __device__
|
| 55 |
+
#else /* !__CUDACC_RTC__ */
|
| 56 |
+
#define __SM_61_INTRINSICS_DECL__ static __device__ __inline__
|
| 57 |
+
#endif /* __CUDACC_RTC__ */
|
| 58 |
+
|
| 59 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 60 |
+
|
| 61 |
+
#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 610
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#include "cuda_runtime_api.h"
|
| 70 |
+
|
| 71 |
+
/*******************************************************************************
|
| 72 |
+
* *
|
| 73 |
+
* Below are implementations of SM-6.1 intrinsics which are included as *
|
| 74 |
+
* source (instead of being built in to the compiler) *
|
| 75 |
+
* *
|
| 76 |
+
*******************************************************************************/
|
| 77 |
+
|
| 78 |
+
// 4a
|
| 79 |
+
__SM_61_INTRINSICS_DECL__ int __dp4a(int srcA, int srcB, int c) {
|
| 80 |
+
int ret;
|
| 81 |
+
asm volatile ("dp4a.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c));
|
| 82 |
+
return ret;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(unsigned int srcA, unsigned int srcB, unsigned int c) {
|
| 86 |
+
unsigned int ret;
|
| 87 |
+
asm volatile ("dp4a.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c));
|
| 88 |
+
return ret;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
__SM_61_INTRINSICS_DECL__ int __dp4a(char4 srcA, char4 srcB, int c) {
|
| 92 |
+
int ret;
|
| 93 |
+
asm volatile ("dp4a.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c));
|
| 94 |
+
return ret;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
__SM_61_INTRINSICS_DECL__ unsigned int __dp4a(uchar4 srcA, uchar4 srcB, unsigned int c) {
|
| 98 |
+
unsigned int ret;
|
| 99 |
+
asm volatile ("dp4a.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c));
|
| 100 |
+
return ret;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
// 2a.lo
|
| 104 |
+
__SM_61_INTRINSICS_DECL__ int __dp2a_lo(int srcA, int srcB, int c) {
|
| 105 |
+
int ret;
|
| 106 |
+
asm volatile ("dp2a.lo.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c));
|
| 107 |
+
return ret;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(unsigned int srcA, unsigned int srcB, unsigned int c) {
|
| 111 |
+
unsigned int ret;
|
| 112 |
+
asm volatile ("dp2a.lo.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c));
|
| 113 |
+
return ret;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
__SM_61_INTRINSICS_DECL__ int __dp2a_lo(short2 srcA, char4 srcB, int c) {
|
| 117 |
+
int ret;
|
| 118 |
+
asm volatile ("dp2a.lo.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c));
|
| 119 |
+
return ret;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_lo(ushort2 srcA, uchar4 srcB, unsigned int c) {
|
| 123 |
+
unsigned int ret;
|
| 124 |
+
asm volatile ("dp2a.lo.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c));
|
| 125 |
+
return ret;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
// 2a.hi
|
| 129 |
+
__SM_61_INTRINSICS_DECL__ int __dp2a_hi(int srcA, int srcB, int c) {
|
| 130 |
+
int ret;
|
| 131 |
+
asm volatile ("dp2a.hi.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c));
|
| 132 |
+
return ret;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(unsigned int srcA, unsigned int srcB, unsigned int c) {
|
| 136 |
+
unsigned int ret;
|
| 137 |
+
asm volatile ("dp2a.hi.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(srcA), "r"(srcB), "r"(c));
|
| 138 |
+
return ret;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
__SM_61_INTRINSICS_DECL__ int __dp2a_hi(short2 srcA, char4 srcB, int c) {
|
| 142 |
+
int ret;
|
| 143 |
+
asm volatile ("dp2a.hi.s32.s32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(int *)&srcA), "r"(*(int *)&srcB), "r"(c));
|
| 144 |
+
return ret;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
__SM_61_INTRINSICS_DECL__ unsigned int __dp2a_hi(ushort2 srcA, uchar4 srcB, unsigned int c) {
|
| 148 |
+
unsigned int ret;
|
| 149 |
+
asm volatile ("dp2a.hi.u32.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(*(unsigned int *)&srcA), "r"(*(unsigned int *)&srcB), "r"(c));
|
| 150 |
+
return ret;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 610 */
|
| 155 |
+
|
| 156 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 157 |
+
|
| 158 |
+
#undef __SM_61_INTRINSICS_DECL__
|
| 159 |
+
|
| 160 |
+
#endif /* !__SM_61_INTRINSICS_HPP__ */
|
| 161 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__SURFACE_FUNCTIONS_H__)
|
| 51 |
+
#define __SURFACE_FUNCTIONS_H__
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 55 |
+
|
| 56 |
+
/*******************************************************************************
|
| 57 |
+
* *
|
| 58 |
+
* *
|
| 59 |
+
* *
|
| 60 |
+
*******************************************************************************/
|
| 61 |
+
|
| 62 |
+
#include "cuda_runtime_api.h"
|
| 63 |
+
#include "cuda_surface_types.h"
|
| 64 |
+
|
| 65 |
+
#if defined(_WIN32)
|
| 66 |
+
# define __DEPRECATED__ __declspec(deprecated)
|
| 67 |
+
#else
|
| 68 |
+
# define __DEPRECATED__ __attribute__((deprecated))
|
| 69 |
+
#endif
|
| 70 |
+
|
| 71 |
+
template <typename T> struct __nv_surf_trait { typedef void * cast_type; };
|
| 72 |
+
|
| 73 |
+
template<> struct __nv_surf_trait<char> { typedef char * cast_type; };
|
| 74 |
+
template<> struct __nv_surf_trait<signed char> { typedef signed char * cast_type; };
|
| 75 |
+
template<> struct __nv_surf_trait<unsigned char> { typedef unsigned char * cast_type; };
|
| 76 |
+
template<> struct __nv_surf_trait<char1> { typedef char1 * cast_type; };
|
| 77 |
+
template<> struct __nv_surf_trait<uchar1> { typedef uchar1 * cast_type; };
|
| 78 |
+
template<> struct __nv_surf_trait<char2> { typedef char2 * cast_type; };
|
| 79 |
+
template<> struct __nv_surf_trait<uchar2> { typedef uchar2 * cast_type; };
|
| 80 |
+
template<> struct __nv_surf_trait<char4> { typedef char4 * cast_type; };
|
| 81 |
+
template<> struct __nv_surf_trait<uchar4> { typedef uchar4 * cast_type; };
|
| 82 |
+
template<> struct __nv_surf_trait<short> { typedef short * cast_type; };
|
| 83 |
+
template<> struct __nv_surf_trait<unsigned short> { typedef unsigned short * cast_type; };
|
| 84 |
+
template<> struct __nv_surf_trait<short1> { typedef short1 * cast_type; };
|
| 85 |
+
template<> struct __nv_surf_trait<ushort1> { typedef ushort1 * cast_type; };
|
| 86 |
+
template<> struct __nv_surf_trait<short2> { typedef short2 * cast_type; };
|
| 87 |
+
template<> struct __nv_surf_trait<ushort2> { typedef ushort2 * cast_type; };
|
| 88 |
+
template<> struct __nv_surf_trait<short4> { typedef short4 * cast_type; };
|
| 89 |
+
template<> struct __nv_surf_trait<ushort4> { typedef ushort4 * cast_type; };
|
| 90 |
+
template<> struct __nv_surf_trait<int> { typedef int * cast_type; };
|
| 91 |
+
template<> struct __nv_surf_trait<unsigned int> { typedef unsigned int * cast_type; };
|
| 92 |
+
template<> struct __nv_surf_trait<int1> { typedef int1 * cast_type; };
|
| 93 |
+
template<> struct __nv_surf_trait<uint1> { typedef uint1 * cast_type; };
|
| 94 |
+
template<> struct __nv_surf_trait<int2> { typedef int2 * cast_type; };
|
| 95 |
+
template<> struct __nv_surf_trait<uint2> { typedef uint2 * cast_type; };
|
| 96 |
+
template<> struct __nv_surf_trait<int4> { typedef int4 * cast_type; };
|
| 97 |
+
template<> struct __nv_surf_trait<uint4> { typedef uint4 * cast_type; };
|
| 98 |
+
template<> struct __nv_surf_trait<long long> { typedef long long * cast_type; };
|
| 99 |
+
template<> struct __nv_surf_trait<unsigned long long> { typedef unsigned long long * cast_type; };
|
| 100 |
+
template<> struct __nv_surf_trait<longlong1> { typedef longlong1 * cast_type; };
|
| 101 |
+
template<> struct __nv_surf_trait<ulonglong1> { typedef ulonglong1 * cast_type; };
|
| 102 |
+
template<> struct __nv_surf_trait<longlong2> { typedef longlong2 * cast_type; };
|
| 103 |
+
template<> struct __nv_surf_trait<ulonglong2> { typedef ulonglong2 * cast_type; };
|
| 104 |
+
#if !defined(__LP64__)
|
| 105 |
+
template<> struct __nv_surf_trait<long> { typedef int * cast_type; };
|
| 106 |
+
template<> struct __nv_surf_trait<unsigned long> { typedef unsigned int * cast_type; };
|
| 107 |
+
template<> struct __nv_surf_trait<long1> { typedef int1 * cast_type; };
|
| 108 |
+
template<> struct __nv_surf_trait<ulong1> { typedef uint1 * cast_type; };
|
| 109 |
+
template<> struct __nv_surf_trait<long2> { typedef int2 * cast_type; };
|
| 110 |
+
template<> struct __nv_surf_trait<ulong2> { typedef uint2 * cast_type; };
|
| 111 |
+
template<> struct __nv_surf_trait<long4> { typedef uint4 * cast_type; };
|
| 112 |
+
template<> struct __nv_surf_trait<ulong4> { typedef int4 * cast_type; };
|
| 113 |
+
#endif
|
| 114 |
+
template<> struct __nv_surf_trait<float> { typedef float * cast_type; };
|
| 115 |
+
template<> struct __nv_surf_trait<float1> { typedef float1 * cast_type; };
|
| 116 |
+
template<> struct __nv_surf_trait<float2> { typedef float2 * cast_type; };
|
| 117 |
+
template<> struct __nv_surf_trait<float4> { typedef float4 * cast_type; };
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
#undef __DEPRECATED__
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 124 |
+
#endif /* !__SURFACE_FUNCTIONS_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
#ifndef __SURFACE_INDIRECT_FUNCTIONS_H__
|
| 52 |
+
#define __SURFACE_INDIRECT_FUNCTIONS_H__
|
| 53 |
+
|
| 54 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 55 |
+
|
| 56 |
+
#include "cuda_runtime_api.h"
|
| 57 |
+
|
| 58 |
+
template<typename T> struct __nv_isurf_trait { };
|
| 59 |
+
template<> struct __nv_isurf_trait<char> { typedef void type; };
|
| 60 |
+
template<> struct __nv_isurf_trait<signed char> { typedef void type; };
|
| 61 |
+
template<> struct __nv_isurf_trait<char1> { typedef void type; };
|
| 62 |
+
template<> struct __nv_isurf_trait<unsigned char> { typedef void type; };
|
| 63 |
+
template<> struct __nv_isurf_trait<uchar1> { typedef void type; };
|
| 64 |
+
template<> struct __nv_isurf_trait<short> { typedef void type; };
|
| 65 |
+
template<> struct __nv_isurf_trait<short1> { typedef void type; };
|
| 66 |
+
template<> struct __nv_isurf_trait<unsigned short> { typedef void type; };
|
| 67 |
+
template<> struct __nv_isurf_trait<ushort1> { typedef void type; };
|
| 68 |
+
template<> struct __nv_isurf_trait<int> { typedef void type; };
|
| 69 |
+
template<> struct __nv_isurf_trait<int1> { typedef void type; };
|
| 70 |
+
template<> struct __nv_isurf_trait<unsigned int> { typedef void type; };
|
| 71 |
+
template<> struct __nv_isurf_trait<uint1> { typedef void type; };
|
| 72 |
+
template<> struct __nv_isurf_trait<long long> { typedef void type; };
|
| 73 |
+
template<> struct __nv_isurf_trait<longlong1> { typedef void type; };
|
| 74 |
+
template<> struct __nv_isurf_trait<unsigned long long> { typedef void type; };
|
| 75 |
+
template<> struct __nv_isurf_trait<ulonglong1> { typedef void type; };
|
| 76 |
+
template<> struct __nv_isurf_trait<float> { typedef void type; };
|
| 77 |
+
template<> struct __nv_isurf_trait<float1> { typedef void type; };
|
| 78 |
+
|
| 79 |
+
template<> struct __nv_isurf_trait<char2> { typedef void type; };
|
| 80 |
+
template<> struct __nv_isurf_trait<uchar2> { typedef void type; };
|
| 81 |
+
template<> struct __nv_isurf_trait<short2> { typedef void type; };
|
| 82 |
+
template<> struct __nv_isurf_trait<ushort2> { typedef void type; };
|
| 83 |
+
template<> struct __nv_isurf_trait<int2> { typedef void type; };
|
| 84 |
+
template<> struct __nv_isurf_trait<uint2> { typedef void type; };
|
| 85 |
+
template<> struct __nv_isurf_trait<longlong2> { typedef void type; };
|
| 86 |
+
template<> struct __nv_isurf_trait<ulonglong2> { typedef void type; };
|
| 87 |
+
template<> struct __nv_isurf_trait<float2> { typedef void type; };
|
| 88 |
+
|
| 89 |
+
template<> struct __nv_isurf_trait<char4> { typedef void type; };
|
| 90 |
+
template<> struct __nv_isurf_trait<uchar4> { typedef void type; };
|
| 91 |
+
template<> struct __nv_isurf_trait<short4> { typedef void type; };
|
| 92 |
+
template<> struct __nv_isurf_trait<ushort4> { typedef void type; };
|
| 93 |
+
template<> struct __nv_isurf_trait<int4> { typedef void type; };
|
| 94 |
+
template<> struct __nv_isurf_trait<uint4> { typedef void type; };
|
| 95 |
+
template<> struct __nv_isurf_trait<float4> { typedef void type; };
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
template <typename T>
|
| 99 |
+
static __device__ typename __nv_isurf_trait<T>::type surf1Dread(T *ptr, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 100 |
+
{
|
| 101 |
+
__nv_tex_surf_handler("__isurf1Dread", ptr, obj, x, mode);
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
template <class T>
|
| 105 |
+
static __device__ T surf1Dread(cudaSurfaceObject_t surfObject, int x, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 106 |
+
{
|
| 107 |
+
T ret;
|
| 108 |
+
surf1Dread(&ret, surfObject, x, boundaryMode);
|
| 109 |
+
return ret;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template <typename T>
|
| 113 |
+
static __device__ typename __nv_isurf_trait<T>::type surf2Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 114 |
+
{
|
| 115 |
+
__nv_tex_surf_handler("__isurf2Dread", ptr, obj, x, y, mode);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
template <class T>
|
| 119 |
+
static __device__ T surf2Dread(cudaSurfaceObject_t surfObject, int x, int y, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 120 |
+
{
|
| 121 |
+
T ret;
|
| 122 |
+
surf2Dread(&ret, surfObject, x, y, boundaryMode);
|
| 123 |
+
return ret;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
template <typename T>
|
| 128 |
+
static __device__ typename __nv_isurf_trait<T>::type surf3Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 129 |
+
{
|
| 130 |
+
__nv_tex_surf_handler("__isurf3Dread", ptr, obj, x, y, z, mode);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
template <class T>
|
| 134 |
+
static __device__ T surf3Dread(cudaSurfaceObject_t surfObject, int x, int y, int z, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 135 |
+
{
|
| 136 |
+
T ret;
|
| 137 |
+
surf3Dread(&ret, surfObject, x, y, z, boundaryMode);
|
| 138 |
+
return ret;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
template <typename T>
|
| 142 |
+
static __device__ typename __nv_isurf_trait<T>::type surf1DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 143 |
+
{
|
| 144 |
+
__nv_tex_surf_handler("__isurf1DLayeredread", ptr, obj, x, layer, mode);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
template <class T>
|
| 148 |
+
static __device__ T surf1DLayeredread(cudaSurfaceObject_t surfObject, int x, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 149 |
+
{
|
| 150 |
+
T ret;
|
| 151 |
+
surf1DLayeredread(&ret, surfObject, x, layer, boundaryMode);
|
| 152 |
+
return ret;
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
template <typename T>
|
| 156 |
+
static __device__ typename __nv_isurf_trait<T>::type surf2DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 157 |
+
{
|
| 158 |
+
__nv_tex_surf_handler("__isurf2DLayeredread", ptr, obj, x, y, layer, mode);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
template <class T>
|
| 162 |
+
static __device__ T surf2DLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 163 |
+
{
|
| 164 |
+
T ret;
|
| 165 |
+
surf2DLayeredread(&ret, surfObject, x, y, layer, boundaryMode);
|
| 166 |
+
return ret;
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
template <typename T>
|
| 170 |
+
static __device__ typename __nv_isurf_trait<T>::type surfCubemapread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 171 |
+
{
|
| 172 |
+
__nv_tex_surf_handler("__isurfCubemapread", ptr, obj, x, y, face, mode);
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
template <class T>
|
| 176 |
+
static __device__ T surfCubemapread(cudaSurfaceObject_t surfObject, int x, int y, int face, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 177 |
+
{
|
| 178 |
+
T ret;
|
| 179 |
+
surfCubemapread(&ret, surfObject, x, y, face, boundaryMode);
|
| 180 |
+
return ret;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
template <typename T>
|
| 184 |
+
static __device__ typename __nv_isurf_trait<T>::type surfCubemapLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 185 |
+
{
|
| 186 |
+
__nv_tex_surf_handler("__isurfCubemapLayeredread", ptr, obj, x, y, layerface, mode);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <class T>
|
| 190 |
+
static __device__ T surfCubemapLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layerface, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
|
| 191 |
+
{
|
| 192 |
+
T ret;
|
| 193 |
+
surfCubemapLayeredread(&ret, surfObject, x, y, layerface, boundaryMode);
|
| 194 |
+
return ret;
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
template <typename T>
|
| 198 |
+
static __device__ typename __nv_isurf_trait<T>::type surf1Dwrite(T val, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 199 |
+
{
|
| 200 |
+
__nv_tex_surf_handler("__isurf1Dwrite_v2", &val, obj, x, mode);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
template <typename T>
|
| 204 |
+
static __device__ typename __nv_isurf_trait<T>::type surf2Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 205 |
+
{
|
| 206 |
+
__nv_tex_surf_handler("__isurf2Dwrite_v2", &val, obj, x, y, mode);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
template <typename T>
|
| 210 |
+
static __device__ typename __nv_isurf_trait<T>::type surf3Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 211 |
+
{
|
| 212 |
+
__nv_tex_surf_handler("__isurf3Dwrite_v2", &val, obj, x, y, z, mode);
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
template <typename T>
|
| 216 |
+
static __device__ typename __nv_isurf_trait<T>::type surf1DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 217 |
+
{
|
| 218 |
+
__nv_tex_surf_handler("__isurf1DLayeredwrite_v2", &val, obj, x, layer, mode);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
template <typename T>
|
| 222 |
+
static __device__ typename __nv_isurf_trait<T>::type surf2DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 223 |
+
{
|
| 224 |
+
__nv_tex_surf_handler("__isurf2DLayeredwrite_v2", &val, obj, x, y, layer, mode);
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
template <typename T>
|
| 228 |
+
static __device__ typename __nv_isurf_trait<T>::type surfCubemapwrite(T val, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 229 |
+
{
|
| 230 |
+
__nv_tex_surf_handler("__isurfCubemapwrite_v2", &val, obj, x, y, face, mode);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
template <typename T>
|
| 234 |
+
static __device__ typename __nv_isurf_trait<T>::type surfCubemapLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
|
| 235 |
+
{
|
| 236 |
+
__nv_tex_surf_handler("__isurfCubemapLayeredwrite_v2", &val, obj, x, y, layerface, mode);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
#endif // __cplusplus && __CUDACC__
|
| 240 |
+
|
| 241 |
+
#endif // __SURFACE_INDIRECT_FUNCTIONS_H__
|
| 242 |
+
|
| 243 |
+
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_fetch_functions.h
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__TEXTURE_FETCH_FUNCTIONS_H__)
|
| 51 |
+
#define __TEXTURE_FETCH_FUNCTIONS_H__
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 55 |
+
|
| 56 |
+
/*******************************************************************************
|
| 57 |
+
* *
|
| 58 |
+
* *
|
| 59 |
+
* *
|
| 60 |
+
*******************************************************************************/
|
| 61 |
+
|
| 62 |
+
#include "cuda_runtime_api.h"
|
| 63 |
+
#include "cuda_texture_types.h"
|
| 64 |
+
|
| 65 |
+
#if defined(_WIN32)
|
| 66 |
+
# define __DEPRECATED__ __declspec(deprecated)
|
| 67 |
+
#else
|
| 68 |
+
# define __DEPRECATED__ __attribute__((deprecated))
|
| 69 |
+
#endif
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
template <typename T>
|
| 73 |
+
struct __nv_tex_rmet_ret { };
|
| 74 |
+
|
| 75 |
+
template<> struct __nv_tex_rmet_ret<char> { typedef char type; };
|
| 76 |
+
template<> struct __nv_tex_rmet_ret<signed char> { typedef signed char type; };
|
| 77 |
+
template<> struct __nv_tex_rmet_ret<unsigned char> { typedef unsigned char type; };
|
| 78 |
+
template<> struct __nv_tex_rmet_ret<char1> { typedef char1 type; };
|
| 79 |
+
template<> struct __nv_tex_rmet_ret<uchar1> { typedef uchar1 type; };
|
| 80 |
+
template<> struct __nv_tex_rmet_ret<char2> { typedef char2 type; };
|
| 81 |
+
template<> struct __nv_tex_rmet_ret<uchar2> { typedef uchar2 type; };
|
| 82 |
+
template<> struct __nv_tex_rmet_ret<char4> { typedef char4 type; };
|
| 83 |
+
template<> struct __nv_tex_rmet_ret<uchar4> { typedef uchar4 type; };
|
| 84 |
+
|
| 85 |
+
template<> struct __nv_tex_rmet_ret<short> { typedef short type; };
|
| 86 |
+
template<> struct __nv_tex_rmet_ret<unsigned short> { typedef unsigned short type; };
|
| 87 |
+
template<> struct __nv_tex_rmet_ret<short1> { typedef short1 type; };
|
| 88 |
+
template<> struct __nv_tex_rmet_ret<ushort1> { typedef ushort1 type; };
|
| 89 |
+
template<> struct __nv_tex_rmet_ret<short2> { typedef short2 type; };
|
| 90 |
+
template<> struct __nv_tex_rmet_ret<ushort2> { typedef ushort2 type; };
|
| 91 |
+
template<> struct __nv_tex_rmet_ret<short4> { typedef short4 type; };
|
| 92 |
+
template<> struct __nv_tex_rmet_ret<ushort4> { typedef ushort4 type; };
|
| 93 |
+
|
| 94 |
+
template<> struct __nv_tex_rmet_ret<int> { typedef int type; };
|
| 95 |
+
template<> struct __nv_tex_rmet_ret<unsigned int> { typedef unsigned int type; };
|
| 96 |
+
template<> struct __nv_tex_rmet_ret<int1> { typedef int1 type; };
|
| 97 |
+
template<> struct __nv_tex_rmet_ret<uint1> { typedef uint1 type; };
|
| 98 |
+
template<> struct __nv_tex_rmet_ret<int2> { typedef int2 type; };
|
| 99 |
+
template<> struct __nv_tex_rmet_ret<uint2> { typedef uint2 type; };
|
| 100 |
+
template<> struct __nv_tex_rmet_ret<int4> { typedef int4 type; };
|
| 101 |
+
template<> struct __nv_tex_rmet_ret<uint4> { typedef uint4 type; };
|
| 102 |
+
|
| 103 |
+
#if !defined(__LP64__)
|
| 104 |
+
template<> struct __nv_tex_rmet_ret<long> { typedef long type; };
|
| 105 |
+
template<> struct __nv_tex_rmet_ret<unsigned long> { typedef unsigned long type; };
|
| 106 |
+
template<> struct __nv_tex_rmet_ret<long1> { typedef long1 type; };
|
| 107 |
+
template<> struct __nv_tex_rmet_ret<ulong1> { typedef ulong1 type; };
|
| 108 |
+
template<> struct __nv_tex_rmet_ret<long2> { typedef long2 type; };
|
| 109 |
+
template<> struct __nv_tex_rmet_ret<ulong2> { typedef ulong2 type; };
|
| 110 |
+
template<> struct __nv_tex_rmet_ret<long4> { typedef long4 type; };
|
| 111 |
+
template<> struct __nv_tex_rmet_ret<ulong4> { typedef ulong4 type; };
|
| 112 |
+
#endif /* !__LP64__ */
|
| 113 |
+
template<> struct __nv_tex_rmet_ret<float> { typedef float type; };
|
| 114 |
+
template<> struct __nv_tex_rmet_ret<float1> { typedef float1 type; };
|
| 115 |
+
template<> struct __nv_tex_rmet_ret<float2> { typedef float2 type; };
|
| 116 |
+
template<> struct __nv_tex_rmet_ret<float4> { typedef float4 type; };
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
template <typename T> struct __nv_tex_rmet_cast { typedef T* type; };
|
| 120 |
+
#if !defined(__LP64__)
|
| 121 |
+
template<> struct __nv_tex_rmet_cast<long> { typedef int *type; };
|
| 122 |
+
template<> struct __nv_tex_rmet_cast<unsigned long> { typedef unsigned int *type; };
|
| 123 |
+
template<> struct __nv_tex_rmet_cast<long1> { typedef int1 *type; };
|
| 124 |
+
template<> struct __nv_tex_rmet_cast<ulong1> { typedef uint1 *type; };
|
| 125 |
+
template<> struct __nv_tex_rmet_cast<long2> { typedef int2 *type; };
|
| 126 |
+
template<> struct __nv_tex_rmet_cast<ulong2> { typedef uint2 *type; };
|
| 127 |
+
template<> struct __nv_tex_rmet_cast<long4> { typedef int4 *type; };
|
| 128 |
+
template<> struct __nv_tex_rmet_cast<ulong4> { typedef uint4 *type; };
|
| 129 |
+
#endif /* !__LP64__ */
|
| 130 |
+
|
| 131 |
+
template <typename T>
|
| 132 |
+
struct __nv_tex_rmnf_ret { };
|
| 133 |
+
|
| 134 |
+
template <> struct __nv_tex_rmnf_ret<char> { typedef float type; };
|
| 135 |
+
template <> struct __nv_tex_rmnf_ret<signed char> { typedef float type; };
|
| 136 |
+
template <> struct __nv_tex_rmnf_ret<unsigned char> { typedef float type; };
|
| 137 |
+
template <> struct __nv_tex_rmnf_ret<short> { typedef float type; };
|
| 138 |
+
template <> struct __nv_tex_rmnf_ret<unsigned short> { typedef float type; };
|
| 139 |
+
template <> struct __nv_tex_rmnf_ret<char1> { typedef float1 type; };
|
| 140 |
+
template <> struct __nv_tex_rmnf_ret<uchar1> { typedef float1 type; };
|
| 141 |
+
template <> struct __nv_tex_rmnf_ret<short1> { typedef float1 type; };
|
| 142 |
+
template <> struct __nv_tex_rmnf_ret<ushort1> { typedef float1 type; };
|
| 143 |
+
template <> struct __nv_tex_rmnf_ret<char2> { typedef float2 type; };
|
| 144 |
+
template <> struct __nv_tex_rmnf_ret<uchar2> { typedef float2 type; };
|
| 145 |
+
template <> struct __nv_tex_rmnf_ret<short2> { typedef float2 type; };
|
| 146 |
+
template <> struct __nv_tex_rmnf_ret<ushort2> { typedef float2 type; };
|
| 147 |
+
template <> struct __nv_tex_rmnf_ret<char4> { typedef float4 type; };
|
| 148 |
+
template <> struct __nv_tex_rmnf_ret<uchar4> { typedef float4 type; };
|
| 149 |
+
template <> struct __nv_tex_rmnf_ret<short4> { typedef float4 type; };
|
| 150 |
+
template <> struct __nv_tex_rmnf_ret<ushort4> { typedef float4 type; };
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
template <typename T>
|
| 154 |
+
struct __nv_tex2dgather_ret { };
|
| 155 |
+
template <> struct __nv_tex2dgather_ret<char> { typedef char4 type; };
|
| 156 |
+
template <> struct __nv_tex2dgather_ret<signed char> { typedef char4 type; };
|
| 157 |
+
template <> struct __nv_tex2dgather_ret<char1> { typedef char4 type; };
|
| 158 |
+
template <> struct __nv_tex2dgather_ret<char2> { typedef char4 type; };
|
| 159 |
+
template <> struct __nv_tex2dgather_ret<char3> { typedef char4 type; };
|
| 160 |
+
template <> struct __nv_tex2dgather_ret<char4> { typedef char4 type; };
|
| 161 |
+
template <> struct __nv_tex2dgather_ret<unsigned char> { typedef uchar4 type; };
|
| 162 |
+
template <> struct __nv_tex2dgather_ret<uchar1> { typedef uchar4 type; };
|
| 163 |
+
template <> struct __nv_tex2dgather_ret<uchar2> { typedef uchar4 type; };
|
| 164 |
+
template <> struct __nv_tex2dgather_ret<uchar3> { typedef uchar4 type; };
|
| 165 |
+
template <> struct __nv_tex2dgather_ret<uchar4> { typedef uchar4 type; };
|
| 166 |
+
|
| 167 |
+
template <> struct __nv_tex2dgather_ret<short> { typedef short4 type; };
|
| 168 |
+
template <> struct __nv_tex2dgather_ret<short1> { typedef short4 type; };
|
| 169 |
+
template <> struct __nv_tex2dgather_ret<short2> { typedef short4 type; };
|
| 170 |
+
template <> struct __nv_tex2dgather_ret<short3> { typedef short4 type; };
|
| 171 |
+
template <> struct __nv_tex2dgather_ret<short4> { typedef short4 type; };
|
| 172 |
+
template <> struct __nv_tex2dgather_ret<unsigned short> { typedef ushort4 type; };
|
| 173 |
+
template <> struct __nv_tex2dgather_ret<ushort1> { typedef ushort4 type; };
|
| 174 |
+
template <> struct __nv_tex2dgather_ret<ushort2> { typedef ushort4 type; };
|
| 175 |
+
template <> struct __nv_tex2dgather_ret<ushort3> { typedef ushort4 type; };
|
| 176 |
+
template <> struct __nv_tex2dgather_ret<ushort4> { typedef ushort4 type; };
|
| 177 |
+
|
| 178 |
+
template <> struct __nv_tex2dgather_ret<int> { typedef int4 type; };
|
| 179 |
+
template <> struct __nv_tex2dgather_ret<int1> { typedef int4 type; };
|
| 180 |
+
template <> struct __nv_tex2dgather_ret<int2> { typedef int4 type; };
|
| 181 |
+
template <> struct __nv_tex2dgather_ret<int3> { typedef int4 type; };
|
| 182 |
+
template <> struct __nv_tex2dgather_ret<int4> { typedef int4 type; };
|
| 183 |
+
template <> struct __nv_tex2dgather_ret<unsigned int> { typedef uint4 type; };
|
| 184 |
+
template <> struct __nv_tex2dgather_ret<uint1> { typedef uint4 type; };
|
| 185 |
+
template <> struct __nv_tex2dgather_ret<uint2> { typedef uint4 type; };
|
| 186 |
+
template <> struct __nv_tex2dgather_ret<uint3> { typedef uint4 type; };
|
| 187 |
+
template <> struct __nv_tex2dgather_ret<uint4> { typedef uint4 type; };
|
| 188 |
+
|
| 189 |
+
template <> struct __nv_tex2dgather_ret<float> { typedef float4 type; };
|
| 190 |
+
template <> struct __nv_tex2dgather_ret<float1> { typedef float4 type; };
|
| 191 |
+
template <> struct __nv_tex2dgather_ret<float2> { typedef float4 type; };
|
| 192 |
+
template <> struct __nv_tex2dgather_ret<float3> { typedef float4 type; };
|
| 193 |
+
template <> struct __nv_tex2dgather_ret<float4> { typedef float4 type; };
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
template<typename T> struct __nv_tex2dgather_rmnf_ret { };
|
| 197 |
+
template<> struct __nv_tex2dgather_rmnf_ret<char> { typedef float4 type; };
|
| 198 |
+
template<> struct __nv_tex2dgather_rmnf_ret<signed char> { typedef float4 type; };
|
| 199 |
+
template<> struct __nv_tex2dgather_rmnf_ret<unsigned char> { typedef float4 type; };
|
| 200 |
+
template<> struct __nv_tex2dgather_rmnf_ret<char1> { typedef float4 type; };
|
| 201 |
+
template<> struct __nv_tex2dgather_rmnf_ret<uchar1> { typedef float4 type; };
|
| 202 |
+
template<> struct __nv_tex2dgather_rmnf_ret<char2> { typedef float4 type; };
|
| 203 |
+
template<> struct __nv_tex2dgather_rmnf_ret<uchar2> { typedef float4 type; };
|
| 204 |
+
template<> struct __nv_tex2dgather_rmnf_ret<char3> { typedef float4 type; };
|
| 205 |
+
template<> struct __nv_tex2dgather_rmnf_ret<uchar3> { typedef float4 type; };
|
| 206 |
+
template<> struct __nv_tex2dgather_rmnf_ret<char4> { typedef float4 type; };
|
| 207 |
+
template<> struct __nv_tex2dgather_rmnf_ret<uchar4> { typedef float4 type; };
|
| 208 |
+
template<> struct __nv_tex2dgather_rmnf_ret<signed short> { typedef float4 type; };
|
| 209 |
+
template<> struct __nv_tex2dgather_rmnf_ret<unsigned short> { typedef float4 type; };
|
| 210 |
+
template<> struct __nv_tex2dgather_rmnf_ret<short1> { typedef float4 type; };
|
| 211 |
+
template<> struct __nv_tex2dgather_rmnf_ret<ushort1> { typedef float4 type; };
|
| 212 |
+
template<> struct __nv_tex2dgather_rmnf_ret<short2> { typedef float4 type; };
|
| 213 |
+
template<> struct __nv_tex2dgather_rmnf_ret<ushort2> { typedef float4 type; };
|
| 214 |
+
template<> struct __nv_tex2dgather_rmnf_ret<short3> { typedef float4 type; };
|
| 215 |
+
template<> struct __nv_tex2dgather_rmnf_ret<ushort3> { typedef float4 type; };
|
| 216 |
+
template<> struct __nv_tex2dgather_rmnf_ret<short4> { typedef float4 type; };
|
| 217 |
+
template<> struct __nv_tex2dgather_rmnf_ret<ushort4> { typedef float4 type; };
|
| 218 |
+
|
| 219 |
+
#undef __DEPRECATED__
|
| 220 |
+
|
| 221 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 222 |
+
|
| 223 |
+
#endif /* !__TEXTURE_FETCH_FUNCTIONS_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__VECTOR_FUNCTIONS_H__)
|
| 51 |
+
#define __VECTOR_FUNCTIONS_H__
|
| 52 |
+
|
| 53 |
+
/*******************************************************************************
|
| 54 |
+
* *
|
| 55 |
+
* *
|
| 56 |
+
* *
|
| 57 |
+
*******************************************************************************/
|
| 58 |
+
|
| 59 |
+
#include "cuda_runtime_api.h"
|
| 60 |
+
|
| 61 |
+
#if defined(__CUDACC_RTC__)
|
| 62 |
+
#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__
|
| 63 |
+
#else /* !__CUDACC_RTC__ */
|
| 64 |
+
#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__
|
| 65 |
+
#endif /* __CUDACC_RTC__ */
|
| 66 |
+
|
| 67 |
+
/*******************************************************************************
|
| 68 |
+
* *
|
| 69 |
+
* *
|
| 70 |
+
* *
|
| 71 |
+
*******************************************************************************/
|
| 72 |
+
|
| 73 |
+
__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x);
|
| 74 |
+
|
| 75 |
+
__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x);
|
| 76 |
+
|
| 77 |
+
__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y);
|
| 78 |
+
|
| 79 |
+
__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y);
|
| 80 |
+
|
| 81 |
+
__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z);
|
| 82 |
+
|
| 83 |
+
__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z);
|
| 84 |
+
|
| 85 |
+
__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w);
|
| 86 |
+
|
| 87 |
+
__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w);
|
| 88 |
+
|
| 89 |
+
__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x);
|
| 90 |
+
|
| 91 |
+
__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x);
|
| 92 |
+
|
| 93 |
+
__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y);
|
| 94 |
+
|
| 95 |
+
__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y);
|
| 96 |
+
|
| 97 |
+
__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z);
|
| 98 |
+
|
| 99 |
+
__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z);
|
| 100 |
+
|
| 101 |
+
__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w);
|
| 102 |
+
|
| 103 |
+
__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w);
|
| 104 |
+
|
| 105 |
+
__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x);
|
| 106 |
+
|
| 107 |
+
__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x);
|
| 108 |
+
|
| 109 |
+
__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y);
|
| 110 |
+
|
| 111 |
+
__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y);
|
| 112 |
+
|
| 113 |
+
__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z);
|
| 114 |
+
|
| 115 |
+
__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z);
|
| 116 |
+
|
| 117 |
+
__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w);
|
| 118 |
+
|
| 119 |
+
__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w);
|
| 120 |
+
|
| 121 |
+
__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x);
|
| 122 |
+
|
| 123 |
+
__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x);
|
| 124 |
+
|
| 125 |
+
__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y);
|
| 126 |
+
|
| 127 |
+
__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y);
|
| 128 |
+
|
| 129 |
+
__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z);
|
| 130 |
+
|
| 131 |
+
__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z);
|
| 132 |
+
|
| 133 |
+
__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w);
|
| 134 |
+
|
| 135 |
+
__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w);
|
| 136 |
+
|
| 137 |
+
__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x);
|
| 138 |
+
|
| 139 |
+
__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y);
|
| 140 |
+
|
| 141 |
+
__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z);
|
| 142 |
+
|
| 143 |
+
__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w);
|
| 144 |
+
|
| 145 |
+
__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x);
|
| 146 |
+
|
| 147 |
+
__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x);
|
| 148 |
+
|
| 149 |
+
__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y);
|
| 150 |
+
|
| 151 |
+
__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y);
|
| 152 |
+
|
| 153 |
+
__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z);
|
| 154 |
+
|
| 155 |
+
__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z);
|
| 156 |
+
|
| 157 |
+
__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w);
|
| 158 |
+
|
| 159 |
+
__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w);
|
| 160 |
+
|
| 161 |
+
__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x);
|
| 162 |
+
|
| 163 |
+
__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y);
|
| 164 |
+
|
| 165 |
+
__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z);
|
| 166 |
+
|
| 167 |
+
__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w);
|
| 168 |
+
|
| 169 |
+
#undef __VECTOR_FUNCTIONS_DECL__
|
| 170 |
+
|
| 171 |
+
#if !defined(__CUDACC_RTC__)
|
| 172 |
+
#include "vector_functions.hpp"
|
| 173 |
+
#endif /* !__CUDACC_RTC__ */
|
| 174 |
+
|
| 175 |
+
#endif /* !__VECTOR_FUNCTIONS_H__ */
|
omnilmm/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c89d9c871d0f1d1b3d1037234a66ebb09f990e3d5324c5025d45835d7d82d462
|
| 3 |
+
size 70922856
|
wemm/lib/python3.10/site-packages/sympy/physics/__pycache__/paulialgebra.cpython-310.pyc
ADDED
|
Binary file (6.13 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/sympy/physics/__pycache__/sho.cpython-310.pyc
ADDED
|
Binary file (2.79 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/sympy/physics/__pycache__/wigner.cpython-310.pyc
ADDED
|
Binary file (37.9 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/sympy/physics/biomechanics/__init__.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Biomechanics extension for SymPy.
|
| 2 |
+
|
| 3 |
+
Includes biomechanics-related constructs which allows users to extend multibody
|
| 4 |
+
models created using `sympy.physics.mechanics` into biomechanical or
|
| 5 |
+
musculoskeletal models involding musculotendons and activation dynamics.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from .activation import (
|
| 10 |
+
ActivationBase,
|
| 11 |
+
FirstOrderActivationDeGroote2016,
|
| 12 |
+
ZerothOrderActivation,
|
| 13 |
+
)
|
| 14 |
+
from .curve import (
|
| 15 |
+
CharacteristicCurveCollection,
|
| 16 |
+
CharacteristicCurveFunction,
|
| 17 |
+
FiberForceLengthActiveDeGroote2016,
|
| 18 |
+
FiberForceLengthPassiveDeGroote2016,
|
| 19 |
+
FiberForceLengthPassiveInverseDeGroote2016,
|
| 20 |
+
FiberForceVelocityDeGroote2016,
|
| 21 |
+
FiberForceVelocityInverseDeGroote2016,
|
| 22 |
+
TendonForceLengthDeGroote2016,
|
| 23 |
+
TendonForceLengthInverseDeGroote2016,
|
| 24 |
+
)
|
| 25 |
+
from .musculotendon import (
|
| 26 |
+
MusculotendonBase,
|
| 27 |
+
MusculotendonDeGroote2016,
|
| 28 |
+
MusculotendonFormulation,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
__all__ = [
|
| 33 |
+
# Musculotendon characteristic curve functions
|
| 34 |
+
'CharacteristicCurveCollection',
|
| 35 |
+
'CharacteristicCurveFunction',
|
| 36 |
+
'FiberForceLengthActiveDeGroote2016',
|
| 37 |
+
'FiberForceLengthPassiveDeGroote2016',
|
| 38 |
+
'FiberForceLengthPassiveInverseDeGroote2016',
|
| 39 |
+
'FiberForceVelocityDeGroote2016',
|
| 40 |
+
'FiberForceVelocityInverseDeGroote2016',
|
| 41 |
+
'TendonForceLengthDeGroote2016',
|
| 42 |
+
'TendonForceLengthInverseDeGroote2016',
|
| 43 |
+
|
| 44 |
+
# Activation dynamics classes
|
| 45 |
+
'ActivationBase',
|
| 46 |
+
'FirstOrderActivationDeGroote2016',
|
| 47 |
+
'ZerothOrderActivation',
|
| 48 |
+
|
| 49 |
+
# Musculotendon classes
|
| 50 |
+
'MusculotendonBase',
|
| 51 |
+
'MusculotendonDeGroote2016',
|
| 52 |
+
'MusculotendonFormulation',
|
| 53 |
+
]
|
wemm/lib/python3.10/site-packages/sympy/physics/biomechanics/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/sympy/physics/biomechanics/__pycache__/_mixin.cpython-310.pyc
ADDED
|
Binary file (1.7 kB). View file
|
|
|