Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +5 -0
- miniCUDA124/bin/cudafe++.exe +3 -0
- miniCUDA124/bin/cuinj64_124.dll +3 -0
- miniCUDA124/bin/cuobjdump.exe +3 -0
- miniCUDA124/bin/nppial64_12.dll +3 -0
- miniCUDA124/bin/nppicc64_12.dll +3 -0
- miniCUDA124/include/CL/cl_gl.h +372 -0
- miniCUDA124/include/CL/cl_gl_ext.h +18 -0
- miniCUDA124/include/CL/cl_platform.h +1478 -0
- miniCUDA124/include/CL/cl_version.h +81 -0
- miniCUDA124/include/CL/opencl.h +32 -0
- miniCUDA124/include/cooperative_groups/memcpy_async.h +62 -0
- miniCUDA124/include/cooperative_groups/reduce.h +63 -0
- miniCUDA124/include/cooperative_groups/scan.h +63 -0
- miniCUDA124/include/crt/common_functions.h +310 -0
- miniCUDA124/include/crt/cudacc_ext.h +64 -0
- miniCUDA124/include/crt/device_double_functions.h +1192 -0
- miniCUDA124/include/crt/device_double_functions.hpp +197 -0
- miniCUDA124/include/crt/device_functions.h +0 -0
- miniCUDA124/include/crt/device_functions.hpp +1197 -0
- miniCUDA124/include/crt/func_macro.h +57 -0
- miniCUDA124/include/crt/host_config.h +310 -0
- miniCUDA124/include/crt/host_defines.h +280 -0
- miniCUDA124/include/crt/host_runtime.h +306 -0
- miniCUDA124/include/crt/math_functions.h +0 -0
- miniCUDA124/include/crt/math_functions.hpp +0 -0
- miniCUDA124/include/crt/mma.h +754 -0
- miniCUDA124/include/crt/mma.hpp +1128 -0
- miniCUDA124/include/crt/nvfunctional +621 -0
- miniCUDA124/include/crt/sm_70_rt.h +139 -0
- miniCUDA124/include/crt/sm_70_rt.hpp +192 -0
- miniCUDA124/include/crt/sm_80_rt.h +164 -0
- miniCUDA124/include/crt/sm_80_rt.hpp +148 -0
- miniCUDA124/include/crt/sm_90_rt.h +282 -0
- miniCUDA124/include/crt/sm_90_rt.hpp +248 -0
- miniCUDA124/include/crt/storage_class.h +142 -0
- miniCUDA124/include/cub/config.cuh +51 -0
- miniCUDA124/include/cub/cub.cuh +116 -0
- miniCUDA124/include/cub/util_allocator.cuh +880 -0
- miniCUDA124/include/cub/util_arch.cuh +174 -0
- miniCUDA124/include/cub/util_compiler.cuh +92 -0
- miniCUDA124/include/cub/util_cpp_dialect.cuh +161 -0
- miniCUDA124/include/cub/util_debug.cuh +329 -0
- miniCUDA124/include/cuda/__cccl_config +16 -0
- miniCUDA124/include/cuda/atomic +16 -0
- miniCUDA124/include/cuda/barrier +285 -0
- miniCUDA124/include/cuda/functional +155 -0
- miniCUDA124/include/cuda/latch +16 -0
- miniCUDA124/include/cuda/memory_resource +632 -0
- miniCUDA124/include/cuda/pipeline +585 -0
.gitattributes
CHANGED
|
@@ -79,3 +79,8 @@ mingw/lib/gcc/mingw32/4.3.3/libstdc++.a filter=lfs diff=lfs merge=lfs -text
|
|
| 79 |
mingw/bin/windres.exe filter=lfs diff=lfs merge=lfs -text
|
| 80 |
mingw/libexec/gcc/mingw32/4.3.3/cc1plus.exe filter=lfs diff=lfs merge=lfs -text
|
| 81 |
mingw/libexec/gcc/mingw32/4.3.3/cc1.exe filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
mingw/bin/windres.exe filter=lfs diff=lfs merge=lfs -text
|
| 80 |
mingw/libexec/gcc/mingw32/4.3.3/cc1plus.exe filter=lfs diff=lfs merge=lfs -text
|
| 81 |
mingw/libexec/gcc/mingw32/4.3.3/cc1.exe filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
miniCUDA124/bin/cuinj64_124.dll filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
miniCUDA124/bin/cuobjdump.exe filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
miniCUDA124/bin/nppicc64_12.dll filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
miniCUDA124/bin/nppial64_12.dll filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
miniCUDA124/bin/cudafe++.exe filter=lfs diff=lfs merge=lfs -text
|
miniCUDA124/bin/cudafe++.exe
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a782d452c6eff5adc291c1114e978716a8a971271f964830fabe1f5caa8d304
|
| 3 |
+
size 7393280
|
miniCUDA124/bin/cuinj64_124.dll
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5dc11bd8c0a2c8e18b454b8e2fcee19cf1f515349d4f8fd149da25e7577a94a8
|
| 3 |
+
size 1513472
|
miniCUDA124/bin/cuobjdump.exe
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:578644883fbce889a88b5752f3c8616b846f0dac455885cea12a206989bece6a
|
| 3 |
+
size 11395584
|
miniCUDA124/bin/nppial64_12.dll
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2350fcd7433901e365ac532b68863120276bbb82a0db5f3913be5bb8d6377518
|
| 3 |
+
size 16312832
|
miniCUDA124/bin/nppicc64_12.dll
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b71840b2fa93878cc0a8e3c76d019436900c157427ea6d6c362ce0213fcb9e53
|
| 3 |
+
size 6125056
|
miniCUDA124/include/CL/cl_gl.h
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright (c) 2008-2023 The Khronos Group Inc.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef OPENCL_CL_GL_H_
|
| 18 |
+
#define OPENCL_CL_GL_H_
|
| 19 |
+
|
| 20 |
+
/*
|
| 21 |
+
** This header is generated from the Khronos OpenCL XML API Registry.
|
| 22 |
+
*/
|
| 23 |
+
|
| 24 |
+
#include <CL/cl.h>
|
| 25 |
+
|
| 26 |
+
/* CL_NO_PROTOTYPES implies CL_NO_EXTENSION_PROTOTYPES: */
|
| 27 |
+
#if defined(CL_NO_PROTOTYPES) && !defined(CL_NO_EXTENSION_PROTOTYPES)
|
| 28 |
+
#define CL_NO_EXTENSION_PROTOTYPES
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
/* CL_NO_EXTENSION_PROTOTYPES implies
|
| 32 |
+
CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES and
|
| 33 |
+
CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES: */
|
| 34 |
+
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
|
| 35 |
+
!defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 36 |
+
#define CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES
|
| 37 |
+
#endif
|
| 38 |
+
#if defined(CL_NO_EXTENSION_PROTOTYPES) && \
|
| 39 |
+
!defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 40 |
+
#define CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES
|
| 41 |
+
#endif
|
| 42 |
+
|
| 43 |
+
#ifdef __cplusplus
|
| 44 |
+
extern "C" {
|
| 45 |
+
#endif
|
| 46 |
+
|
| 47 |
+
/***************************************************************
|
| 48 |
+
* cl_khr_gl_sharing
|
| 49 |
+
***************************************************************/
|
| 50 |
+
#define cl_khr_gl_sharing 1
|
| 51 |
+
#define CL_KHR_GL_SHARING_EXTENSION_NAME \
|
| 52 |
+
"cl_khr_gl_sharing"
|
| 53 |
+
|
| 54 |
+
typedef cl_uint cl_gl_context_info;
|
| 55 |
+
|
| 56 |
+
/* Error codes */
|
| 57 |
+
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
|
| 58 |
+
|
| 59 |
+
/* cl_gl_context_info */
|
| 60 |
+
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
|
| 61 |
+
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
|
| 62 |
+
|
| 63 |
+
/* Additional cl_context_properties */
|
| 64 |
+
#define CL_GL_CONTEXT_KHR 0x2008
|
| 65 |
+
#define CL_EGL_DISPLAY_KHR 0x2009
|
| 66 |
+
#define CL_GLX_DISPLAY_KHR 0x200A
|
| 67 |
+
#define CL_WGL_HDC_KHR 0x200B
|
| 68 |
+
#define CL_CGL_SHAREGROUP_KHR 0x200C
|
| 69 |
+
|
| 70 |
+
typedef cl_uint cl_gl_object_type;
|
| 71 |
+
typedef cl_uint cl_gl_texture_info;
|
| 72 |
+
typedef cl_uint cl_gl_platform_info;
|
| 73 |
+
|
| 74 |
+
/* cl_gl_object_type */
|
| 75 |
+
#define CL_GL_OBJECT_BUFFER 0x2000
|
| 76 |
+
#define CL_GL_OBJECT_TEXTURE2D 0x2001
|
| 77 |
+
#define CL_GL_OBJECT_TEXTURE3D 0x2002
|
| 78 |
+
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
|
| 79 |
+
|
| 80 |
+
#if defined(CL_VERSION_1_2)
|
| 81 |
+
/* cl_gl_object_type */
|
| 82 |
+
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
|
| 83 |
+
#define CL_GL_OBJECT_TEXTURE1D 0x200F
|
| 84 |
+
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
|
| 85 |
+
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
|
| 86 |
+
|
| 87 |
+
#endif /* defined(CL_VERSION_1_2) */
|
| 88 |
+
|
| 89 |
+
/* cl_gl_texture_info */
|
| 90 |
+
#define CL_GL_TEXTURE_TARGET 0x2004
|
| 91 |
+
#define CL_GL_MIPMAP_LEVEL 0x2005
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
typedef cl_int (CL_API_CALL *
|
| 95 |
+
clGetGLContextInfoKHR_fn)(
|
| 96 |
+
const cl_context_properties* properties,
|
| 97 |
+
cl_gl_context_info param_name,
|
| 98 |
+
size_t param_value_size,
|
| 99 |
+
void* param_value,
|
| 100 |
+
size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 101 |
+
|
| 102 |
+
typedef cl_mem (CL_API_CALL *
|
| 103 |
+
clCreateFromGLBuffer_fn)(
|
| 104 |
+
cl_context context,
|
| 105 |
+
cl_mem_flags flags,
|
| 106 |
+
cl_GLuint bufobj,
|
| 107 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 108 |
+
|
| 109 |
+
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 110 |
+
|
| 111 |
+
extern CL_API_ENTRY cl_int CL_API_CALL
|
| 112 |
+
clGetGLContextInfoKHR(
|
| 113 |
+
const cl_context_properties* properties,
|
| 114 |
+
cl_gl_context_info param_name,
|
| 115 |
+
size_t param_value_size,
|
| 116 |
+
void* param_value,
|
| 117 |
+
size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 118 |
+
|
| 119 |
+
extern CL_API_ENTRY cl_mem CL_API_CALL
|
| 120 |
+
clCreateFromGLBuffer(
|
| 121 |
+
cl_context context,
|
| 122 |
+
cl_mem_flags flags,
|
| 123 |
+
cl_GLuint bufobj,
|
| 124 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 125 |
+
|
| 126 |
+
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
|
| 127 |
+
|
| 128 |
+
#if defined(CL_VERSION_1_2)
|
| 129 |
+
|
| 130 |
+
typedef cl_mem (CL_API_CALL *
|
| 131 |
+
clCreateFromGLTexture_fn)(
|
| 132 |
+
cl_context context,
|
| 133 |
+
cl_mem_flags flags,
|
| 134 |
+
cl_GLenum target,
|
| 135 |
+
cl_GLint miplevel,
|
| 136 |
+
cl_GLuint texture,
|
| 137 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
|
| 138 |
+
|
| 139 |
+
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 140 |
+
|
| 141 |
+
extern CL_API_ENTRY cl_mem CL_API_CALL
|
| 142 |
+
clCreateFromGLTexture(
|
| 143 |
+
cl_context context,
|
| 144 |
+
cl_mem_flags flags,
|
| 145 |
+
cl_GLenum target,
|
| 146 |
+
cl_GLint miplevel,
|
| 147 |
+
cl_GLuint texture,
|
| 148 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_2;
|
| 149 |
+
|
| 150 |
+
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
|
| 151 |
+
|
| 152 |
+
#endif /* defined(CL_VERSION_1_2) */
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
typedef cl_mem (CL_API_CALL *
|
| 156 |
+
clCreateFromGLRenderbuffer_fn)(
|
| 157 |
+
cl_context context,
|
| 158 |
+
cl_mem_flags flags,
|
| 159 |
+
cl_GLuint renderbuffer,
|
| 160 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 161 |
+
|
| 162 |
+
typedef cl_int (CL_API_CALL *
|
| 163 |
+
clGetGLObjectInfo_fn)(
|
| 164 |
+
cl_mem memobj,
|
| 165 |
+
cl_gl_object_type* gl_object_type,
|
| 166 |
+
cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0;
|
| 167 |
+
|
| 168 |
+
typedef cl_int (CL_API_CALL *
|
| 169 |
+
clGetGLTextureInfo_fn)(
|
| 170 |
+
cl_mem memobj,
|
| 171 |
+
cl_gl_texture_info param_name,
|
| 172 |
+
size_t param_value_size,
|
| 173 |
+
void* param_value,
|
| 174 |
+
size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 175 |
+
|
| 176 |
+
typedef cl_int (CL_API_CALL *
|
| 177 |
+
clEnqueueAcquireGLObjects_fn)(
|
| 178 |
+
cl_command_queue command_queue,
|
| 179 |
+
cl_uint num_objects,
|
| 180 |
+
const cl_mem* mem_objects,
|
| 181 |
+
cl_uint num_events_in_wait_list,
|
| 182 |
+
const cl_event* event_wait_list,
|
| 183 |
+
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
|
| 184 |
+
|
| 185 |
+
typedef cl_int (CL_API_CALL *
|
| 186 |
+
clEnqueueReleaseGLObjects_fn)(
|
| 187 |
+
cl_command_queue command_queue,
|
| 188 |
+
cl_uint num_objects,
|
| 189 |
+
const cl_mem* mem_objects,
|
| 190 |
+
cl_uint num_events_in_wait_list,
|
| 191 |
+
const cl_event* event_wait_list,
|
| 192 |
+
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
|
| 193 |
+
|
| 194 |
+
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 195 |
+
|
| 196 |
+
extern CL_API_ENTRY cl_mem CL_API_CALL
|
| 197 |
+
clCreateFromGLRenderbuffer(
|
| 198 |
+
cl_context context,
|
| 199 |
+
cl_mem_flags flags,
|
| 200 |
+
cl_GLuint renderbuffer,
|
| 201 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 202 |
+
|
| 203 |
+
extern CL_API_ENTRY cl_int CL_API_CALL
|
| 204 |
+
clGetGLObjectInfo(
|
| 205 |
+
cl_mem memobj,
|
| 206 |
+
cl_gl_object_type* gl_object_type,
|
| 207 |
+
cl_GLuint* gl_object_name) CL_API_SUFFIX__VERSION_1_0;
|
| 208 |
+
|
| 209 |
+
extern CL_API_ENTRY cl_int CL_API_CALL
|
| 210 |
+
clGetGLTextureInfo(
|
| 211 |
+
cl_mem memobj,
|
| 212 |
+
cl_gl_texture_info param_name,
|
| 213 |
+
size_t param_value_size,
|
| 214 |
+
void* param_value,
|
| 215 |
+
size_t* param_value_size_ret) CL_API_SUFFIX__VERSION_1_0;
|
| 216 |
+
|
| 217 |
+
extern CL_API_ENTRY cl_int CL_API_CALL
|
| 218 |
+
clEnqueueAcquireGLObjects(
|
| 219 |
+
cl_command_queue command_queue,
|
| 220 |
+
cl_uint num_objects,
|
| 221 |
+
const cl_mem* mem_objects,
|
| 222 |
+
cl_uint num_events_in_wait_list,
|
| 223 |
+
const cl_event* event_wait_list,
|
| 224 |
+
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
|
| 225 |
+
|
| 226 |
+
extern CL_API_ENTRY cl_int CL_API_CALL
|
| 227 |
+
clEnqueueReleaseGLObjects(
|
| 228 |
+
cl_command_queue command_queue,
|
| 229 |
+
cl_uint num_objects,
|
| 230 |
+
const cl_mem* mem_objects,
|
| 231 |
+
cl_uint num_events_in_wait_list,
|
| 232 |
+
const cl_event* event_wait_list,
|
| 233 |
+
cl_event* event) CL_API_SUFFIX__VERSION_1_0;
|
| 234 |
+
|
| 235 |
+
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
|
| 236 |
+
|
| 237 |
+
/* OpenCL 1.0 APIs that were deprecated in OpenCL 1.2 */
|
| 238 |
+
|
| 239 |
+
typedef cl_mem (CL_API_CALL *
|
| 240 |
+
clCreateFromGLTexture2D_fn)(
|
| 241 |
+
cl_context context,
|
| 242 |
+
cl_mem_flags flags,
|
| 243 |
+
cl_GLenum target,
|
| 244 |
+
cl_GLint miplevel,
|
| 245 |
+
cl_GLuint texture,
|
| 246 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
|
| 247 |
+
|
| 248 |
+
typedef cl_mem (CL_API_CALL *
|
| 249 |
+
clCreateFromGLTexture3D_fn)(
|
| 250 |
+
cl_context context,
|
| 251 |
+
cl_mem_flags flags,
|
| 252 |
+
cl_GLenum target,
|
| 253 |
+
cl_GLint miplevel,
|
| 254 |
+
cl_GLuint texture,
|
| 255 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
|
| 256 |
+
|
| 257 |
+
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 258 |
+
|
| 259 |
+
extern CL_API_ENTRY cl_mem CL_API_CALL
|
| 260 |
+
clCreateFromGLTexture2D(
|
| 261 |
+
cl_context context,
|
| 262 |
+
cl_mem_flags flags,
|
| 263 |
+
cl_GLenum target,
|
| 264 |
+
cl_GLint miplevel,
|
| 265 |
+
cl_GLuint texture,
|
| 266 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
|
| 267 |
+
|
| 268 |
+
extern CL_API_ENTRY cl_mem CL_API_CALL
|
| 269 |
+
clCreateFromGLTexture3D(
|
| 270 |
+
cl_context context,
|
| 271 |
+
cl_mem_flags flags,
|
| 272 |
+
cl_GLenum target,
|
| 273 |
+
cl_GLint miplevel,
|
| 274 |
+
cl_GLuint texture,
|
| 275 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED;
|
| 276 |
+
|
| 277 |
+
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
|
| 278 |
+
|
| 279 |
+
/***************************************************************
|
| 280 |
+
* cl_khr_gl_event
|
| 281 |
+
***************************************************************/
|
| 282 |
+
#define cl_khr_gl_event 1
|
| 283 |
+
#define CL_KHR_GL_EVENT_EXTENSION_NAME \
|
| 284 |
+
"cl_khr_gl_event"
|
| 285 |
+
|
| 286 |
+
typedef struct __GLsync * cl_GLsync;
|
| 287 |
+
|
| 288 |
+
/* cl_command_type */
|
| 289 |
+
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
typedef cl_event (CL_API_CALL *
|
| 293 |
+
clCreateEventFromGLsyncKHR_fn)(
|
| 294 |
+
cl_context context,
|
| 295 |
+
cl_GLsync sync,
|
| 296 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1;
|
| 297 |
+
|
| 298 |
+
#if !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 299 |
+
|
| 300 |
+
extern CL_API_ENTRY cl_event CL_API_CALL
|
| 301 |
+
clCreateEventFromGLsyncKHR(
|
| 302 |
+
cl_context context,
|
| 303 |
+
cl_GLsync sync,
|
| 304 |
+
cl_int* errcode_ret) CL_API_SUFFIX__VERSION_1_1;
|
| 305 |
+
|
| 306 |
+
#endif /* !defined(CL_NO_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
|
| 307 |
+
|
| 308 |
+
/***************************************************************
|
| 309 |
+
* cl_khr_gl_depth_images
|
| 310 |
+
***************************************************************/
|
| 311 |
+
#define cl_khr_gl_depth_images 1
|
| 312 |
+
#define CL_KHR_GL_DEPTH_IMAGES_EXTENSION_NAME \
|
| 313 |
+
"cl_khr_gl_depth_images"
|
| 314 |
+
|
| 315 |
+
#if !defined(CL_VERSION_1_2)
|
| 316 |
+
/* cl_channel_order - defined in CL.h for OpenCL 1.2 and newer */
|
| 317 |
+
#define CL_DEPTH_STENCIL 0x10BE
|
| 318 |
+
|
| 319 |
+
#endif /* !defined(CL_VERSION_1_2) */
|
| 320 |
+
|
| 321 |
+
#if !defined(CL_VERSION_1_2)
|
| 322 |
+
/* cl_channel_type - defined in CL.h for OpenCL 1.2 and newer */
|
| 323 |
+
#define CL_UNORM_INT24 0x10DF
|
| 324 |
+
|
| 325 |
+
#endif /* !defined(CL_VERSION_1_2) */
|
| 326 |
+
|
| 327 |
+
/***************************************************************
|
| 328 |
+
* cl_khr_gl_msaa_sharing
|
| 329 |
+
***************************************************************/
|
| 330 |
+
#define cl_khr_gl_msaa_sharing 1
|
| 331 |
+
#define CL_KHR_GL_MSAA_SHARING_EXTENSION_NAME \
|
| 332 |
+
"cl_khr_gl_msaa_sharing"
|
| 333 |
+
|
| 334 |
+
/* cl_gl_texture_info */
|
| 335 |
+
#define CL_GL_NUM_SAMPLES 0x2012
|
| 336 |
+
|
| 337 |
+
/***************************************************************
|
| 338 |
+
* cl_intel_sharing_format_query_gl
|
| 339 |
+
***************************************************************/
|
| 340 |
+
#define cl_intel_sharing_format_query_gl 1
|
| 341 |
+
#define CL_INTEL_SHARING_FORMAT_QUERY_GL_EXTENSION_NAME \
|
| 342 |
+
"cl_intel_sharing_format_query_gl"
|
| 343 |
+
|
| 344 |
+
/* when cl_khr_gl_sharing is supported */
|
| 345 |
+
|
| 346 |
+
typedef cl_int (CL_API_CALL *
|
| 347 |
+
clGetSupportedGLTextureFormatsINTEL_fn)(
|
| 348 |
+
cl_context context,
|
| 349 |
+
cl_mem_flags flags,
|
| 350 |
+
cl_mem_object_type image_type,
|
| 351 |
+
cl_uint num_entries,
|
| 352 |
+
cl_GLenum* gl_formats,
|
| 353 |
+
cl_uint* num_texture_formats) ;
|
| 354 |
+
|
| 355 |
+
#if !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES)
|
| 356 |
+
|
| 357 |
+
extern CL_API_ENTRY cl_int CL_API_CALL
|
| 358 |
+
clGetSupportedGLTextureFormatsINTEL(
|
| 359 |
+
cl_context context,
|
| 360 |
+
cl_mem_flags flags,
|
| 361 |
+
cl_mem_object_type image_type,
|
| 362 |
+
cl_uint num_entries,
|
| 363 |
+
cl_GLenum* gl_formats,
|
| 364 |
+
cl_uint* num_texture_formats) ;
|
| 365 |
+
|
| 366 |
+
#endif /* !defined(CL_NO_NON_ICD_DISPATCH_EXTENSION_PROTOTYPES) */
|
| 367 |
+
|
| 368 |
+
#ifdef __cplusplus
|
| 369 |
+
}
|
| 370 |
+
#endif
|
| 371 |
+
|
| 372 |
+
#endif /* OPENCL_CL_GL_H_ */
|
miniCUDA124/include/CL/cl_gl_ext.h
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright (c) 2008-2021 The Khronos Group Inc.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#include <CL/cl_gl.h>
|
| 18 |
+
#pragma message("The extensions in cl_gl_ext.h have been moved into cl_gl.h. Please include cl_gl.h directly.")
|
miniCUDA124/include/CL/cl_platform.h
ADDED
|
@@ -0,0 +1,1478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright (c) 2008-2020 The Khronos Group Inc.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef __CL_PLATFORM_H
|
| 18 |
+
#define __CL_PLATFORM_H
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#ifndef __CL_VERSION_H
|
| 22 |
+
#define __CL_VERSION_H
|
| 23 |
+
|
| 24 |
+
/* Detect which version to target */
|
| 25 |
+
#if !defined(CL_TARGET_OPENCL_VERSION)
|
| 26 |
+
#pragma message("cl_version.h: CL_TARGET_OPENCL_VERSION is not defined. Defaulting to 300 (OpenCL 3.0)")
|
| 27 |
+
#define CL_TARGET_OPENCL_VERSION 300
|
| 28 |
+
#endif
|
| 29 |
+
#if CL_TARGET_OPENCL_VERSION != 100 && \
|
| 30 |
+
CL_TARGET_OPENCL_VERSION != 110 && \
|
| 31 |
+
CL_TARGET_OPENCL_VERSION != 120 && \
|
| 32 |
+
CL_TARGET_OPENCL_VERSION != 200 && \
|
| 33 |
+
CL_TARGET_OPENCL_VERSION != 210 && \
|
| 34 |
+
CL_TARGET_OPENCL_VERSION != 220 && \
|
| 35 |
+
CL_TARGET_OPENCL_VERSION != 300
|
| 36 |
+
#pragma message("cl_version: CL_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220, 300). Defaulting to 300 (OpenCL 3.0)")
|
| 37 |
+
#undef CL_TARGET_OPENCL_VERSION
|
| 38 |
+
#define CL_TARGET_OPENCL_VERSION 300
|
| 39 |
+
#endif
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
/* OpenCL Version */
|
| 43 |
+
#if CL_TARGET_OPENCL_VERSION >= 300 && !defined(CL_VERSION_3_0)
|
| 44 |
+
#define CL_VERSION_3_0 1
|
| 45 |
+
#endif
|
| 46 |
+
#if CL_TARGET_OPENCL_VERSION >= 220 && !defined(CL_VERSION_2_2)
|
| 47 |
+
#define CL_VERSION_2_2 1
|
| 48 |
+
#endif
|
| 49 |
+
#if CL_TARGET_OPENCL_VERSION >= 210 && !defined(CL_VERSION_2_1)
|
| 50 |
+
#define CL_VERSION_2_1 1
|
| 51 |
+
#endif
|
| 52 |
+
#if CL_TARGET_OPENCL_VERSION >= 200 && !defined(CL_VERSION_2_0)
|
| 53 |
+
#define CL_VERSION_2_0 1
|
| 54 |
+
#endif
|
| 55 |
+
#if CL_TARGET_OPENCL_VERSION >= 120 && !defined(CL_VERSION_1_2)
|
| 56 |
+
#define CL_VERSION_1_2 1
|
| 57 |
+
#endif
|
| 58 |
+
#if CL_TARGET_OPENCL_VERSION >= 110 && !defined(CL_VERSION_1_1)
|
| 59 |
+
#define CL_VERSION_1_1 1
|
| 60 |
+
#endif
|
| 61 |
+
#if CL_TARGET_OPENCL_VERSION >= 100 && !defined(CL_VERSION_1_0)
|
| 62 |
+
#define CL_VERSION_1_0 1
|
| 63 |
+
#endif
|
| 64 |
+
|
| 65 |
+
/* Allow deprecated APIs for older OpenCL versions. */
|
| 66 |
+
#if CL_TARGET_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS)
|
| 67 |
+
#define CL_USE_DEPRECATED_OPENCL_2_2_APIS
|
| 68 |
+
#endif
|
| 69 |
+
#if CL_TARGET_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS)
|
| 70 |
+
#define CL_USE_DEPRECATED_OPENCL_2_1_APIS
|
| 71 |
+
#endif
|
| 72 |
+
#if CL_TARGET_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
|
| 73 |
+
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS
|
| 74 |
+
#endif
|
| 75 |
+
#if CL_TARGET_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
|
| 76 |
+
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
|
| 77 |
+
#endif
|
| 78 |
+
#if CL_TARGET_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
|
| 79 |
+
#define CL_USE_DEPRECATED_OPENCL_1_1_APIS
|
| 80 |
+
#endif
|
| 81 |
+
#if CL_TARGET_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
|
| 82 |
+
#define CL_USE_DEPRECATED_OPENCL_1_0_APIS
|
| 83 |
+
#endif
|
| 84 |
+
|
| 85 |
+
#endif /* __CL_VERSION_H */
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
#ifdef __cplusplus
|
| 89 |
+
extern "C" {
|
| 90 |
+
#endif
|
| 91 |
+
|
| 92 |
+
#if defined(_WIN32)
|
| 93 |
+
#if !defined(CL_API_ENTRY)
|
| 94 |
+
#define CL_API_ENTRY
|
| 95 |
+
#endif
|
| 96 |
+
#if !defined(CL_API_CALL)
|
| 97 |
+
#define CL_API_CALL __stdcall
|
| 98 |
+
#endif
|
| 99 |
+
#if !defined(CL_CALLBACK)
|
| 100 |
+
#define CL_CALLBACK __stdcall
|
| 101 |
+
#endif
|
| 102 |
+
#else
|
| 103 |
+
#if !defined(CL_API_ENTRY)
|
| 104 |
+
#define CL_API_ENTRY
|
| 105 |
+
#endif
|
| 106 |
+
#if !defined(CL_API_CALL)
|
| 107 |
+
#define CL_API_CALL
|
| 108 |
+
#endif
|
| 109 |
+
#if !defined(CL_CALLBACK)
|
| 110 |
+
#define CL_CALLBACK
|
| 111 |
+
#endif
|
| 112 |
+
#endif
|
| 113 |
+
|
| 114 |
+
/*
|
| 115 |
+
* Deprecation flags refer to the last version of the header in which the
|
| 116 |
+
* feature was not deprecated.
|
| 117 |
+
*
|
| 118 |
+
* E.g. VERSION_1_1_DEPRECATED means the feature is present in 1.1 without
|
| 119 |
+
* deprecation but is deprecated in versions later than 1.1.
|
| 120 |
+
*/
|
| 121 |
+
|
| 122 |
+
#ifndef CL_API_SUFFIX_USER
|
| 123 |
+
#define CL_API_SUFFIX_USER
|
| 124 |
+
#endif
|
| 125 |
+
|
| 126 |
+
#ifndef CL_API_PREFIX_USER
|
| 127 |
+
#define CL_API_PREFIX_USER
|
| 128 |
+
#endif
|
| 129 |
+
|
| 130 |
+
#define CL_API_SUFFIX_COMMON CL_API_SUFFIX_USER
|
| 131 |
+
#define CL_API_PREFIX_COMMON CL_API_PREFIX_USER
|
| 132 |
+
|
| 133 |
+
#define CL_API_SUFFIX__VERSION_1_0 CL_API_SUFFIX_COMMON
|
| 134 |
+
#define CL_API_SUFFIX__VERSION_1_1 CL_API_SUFFIX_COMMON
|
| 135 |
+
#define CL_API_SUFFIX__VERSION_1_2 CL_API_SUFFIX_COMMON
|
| 136 |
+
#define CL_API_SUFFIX__VERSION_2_0 CL_API_SUFFIX_COMMON
|
| 137 |
+
#define CL_API_SUFFIX__VERSION_2_1 CL_API_SUFFIX_COMMON
|
| 138 |
+
#define CL_API_SUFFIX__VERSION_2_2 CL_API_SUFFIX_COMMON
|
| 139 |
+
#define CL_API_SUFFIX__VERSION_3_0 CL_API_SUFFIX_COMMON
|
| 140 |
+
#define CL_API_SUFFIX__EXPERIMENTAL CL_API_SUFFIX_COMMON
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
#ifdef __GNUC__
|
| 144 |
+
#define CL_API_SUFFIX_DEPRECATED __attribute__((deprecated))
|
| 145 |
+
#define CL_API_PREFIX_DEPRECATED
|
| 146 |
+
#elif defined(_WIN32)
|
| 147 |
+
#define CL_API_SUFFIX_DEPRECATED
|
| 148 |
+
#define CL_API_PREFIX_DEPRECATED __declspec(deprecated)
|
| 149 |
+
#else
|
| 150 |
+
#define CL_API_SUFFIX_DEPRECATED
|
| 151 |
+
#define CL_API_PREFIX_DEPRECATED
|
| 152 |
+
#endif
|
| 153 |
+
|
| 154 |
+
#ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS
|
| 155 |
+
#define CL_API_SUFFIX__VERSION_1_0_DEPRECATED CL_API_SUFFIX_COMMON
|
| 156 |
+
#define CL_API_PREFIX__VERSION_1_0_DEPRECATED CL_API_PREFIX_COMMON
|
| 157 |
+
#else
|
| 158 |
+
#define CL_API_SUFFIX__VERSION_1_0_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED
|
| 159 |
+
#define CL_API_PREFIX__VERSION_1_0_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED
|
| 160 |
+
#endif
|
| 161 |
+
|
| 162 |
+
#ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS
|
| 163 |
+
#define CL_API_SUFFIX__VERSION_1_1_DEPRECATED CL_API_SUFFIX_COMMON
|
| 164 |
+
#define CL_API_PREFIX__VERSION_1_1_DEPRECATED CL_API_PREFIX_COMMON
|
| 165 |
+
#else
|
| 166 |
+
#define CL_API_SUFFIX__VERSION_1_1_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED
|
| 167 |
+
#define CL_API_PREFIX__VERSION_1_1_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED
|
| 168 |
+
#endif
|
| 169 |
+
|
| 170 |
+
#ifdef CL_USE_DEPRECATED_OPENCL_1_2_APIS
|
| 171 |
+
#define CL_API_SUFFIX__VERSION_1_2_DEPRECATED CL_API_SUFFIX_COMMON
|
| 172 |
+
#define CL_API_PREFIX__VERSION_1_2_DEPRECATED CL_API_PREFIX_COMMON
|
| 173 |
+
#else
|
| 174 |
+
#define CL_API_SUFFIX__VERSION_1_2_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED
|
| 175 |
+
#define CL_API_PREFIX__VERSION_1_2_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED
|
| 176 |
+
#endif
|
| 177 |
+
|
| 178 |
+
#ifdef CL_USE_DEPRECATED_OPENCL_2_0_APIS
|
| 179 |
+
#define CL_API_SUFFIX__VERSION_2_0_DEPRECATED CL_API_SUFFIX_COMMON
|
| 180 |
+
#define CL_API_PREFIX__VERSION_2_0_DEPRECATED CL_API_PREFIX_COMMON
|
| 181 |
+
#else
|
| 182 |
+
#define CL_API_SUFFIX__VERSION_2_0_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED
|
| 183 |
+
#define CL_API_PREFIX__VERSION_2_0_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED
|
| 184 |
+
#endif
|
| 185 |
+
|
| 186 |
+
#ifdef CL_USE_DEPRECATED_OPENCL_2_1_APIS
|
| 187 |
+
#define CL_API_SUFFIX__VERSION_2_1_DEPRECATED CL_API_SUFFIX_COMMON
|
| 188 |
+
#define CL_API_PREFIX__VERSION_2_1_DEPRECATED CL_API_PREFIX_COMMON
|
| 189 |
+
#else
|
| 190 |
+
#define CL_API_SUFFIX__VERSION_2_1_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED
|
| 191 |
+
#define CL_API_PREFIX__VERSION_2_1_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED
|
| 192 |
+
#endif
|
| 193 |
+
|
| 194 |
+
#ifdef CL_USE_DEPRECATED_OPENCL_2_2_APIS
|
| 195 |
+
#define CL_API_SUFFIX__VERSION_2_2_DEPRECATED CL_API_SUFFIX_COMMON
|
| 196 |
+
#define CL_API_PREFIX__VERSION_2_2_DEPRECATED CL_API_PREFIX_COMMON
|
| 197 |
+
#else
|
| 198 |
+
#define CL_API_SUFFIX__VERSION_2_2_DEPRECATED CL_API_SUFFIX_COMMON CL_API_SUFFIX_DEPRECATED
|
| 199 |
+
#define CL_API_PREFIX__VERSION_2_2_DEPRECATED CL_API_PREFIX_COMMON CL_API_PREFIX_DEPRECATED
|
| 200 |
+
#endif
|
| 201 |
+
|
| 202 |
+
#if (defined (_WIN32) && defined(_MSC_VER))
|
| 203 |
+
|
| 204 |
+
#if defined(__clang__)
|
| 205 |
+
#pragma clang diagnostic push
|
| 206 |
+
#pragma clang diagnostic ignored "-Wlanguage-extension-token"
|
| 207 |
+
#endif
|
| 208 |
+
|
| 209 |
+
/* intptr_t is used in cl.h and provided by stddef.h in Visual C++, but not in clang */
|
| 210 |
+
/* stdint.h was missing before Visual Studio 2010, include it for later versions and for clang */
|
| 211 |
+
#if defined(__clang__) || _MSC_VER >= 1600
|
| 212 |
+
#include <stdint.h>
|
| 213 |
+
#endif
|
| 214 |
+
|
| 215 |
+
/* scalar types */
|
| 216 |
+
typedef signed __int8 cl_char;
|
| 217 |
+
typedef unsigned __int8 cl_uchar;
|
| 218 |
+
typedef signed __int16 cl_short;
|
| 219 |
+
typedef unsigned __int16 cl_ushort;
|
| 220 |
+
typedef signed __int32 cl_int;
|
| 221 |
+
typedef unsigned __int32 cl_uint;
|
| 222 |
+
typedef signed __int64 cl_long;
|
| 223 |
+
typedef unsigned __int64 cl_ulong;
|
| 224 |
+
|
| 225 |
+
typedef unsigned __int16 cl_half;
|
| 226 |
+
typedef float cl_float;
|
| 227 |
+
typedef double cl_double;
|
| 228 |
+
|
| 229 |
+
#if defined(__clang__)
|
| 230 |
+
#pragma clang diagnostic pop
|
| 231 |
+
#endif
|
| 232 |
+
|
| 233 |
+
/* Macro names and corresponding values defined by OpenCL */
|
| 234 |
+
#define CL_CHAR_BIT 8
|
| 235 |
+
#define CL_SCHAR_MAX 127
|
| 236 |
+
#define CL_SCHAR_MIN (-127-1)
|
| 237 |
+
#define CL_CHAR_MAX CL_SCHAR_MAX
|
| 238 |
+
#define CL_CHAR_MIN CL_SCHAR_MIN
|
| 239 |
+
#define CL_UCHAR_MAX 255
|
| 240 |
+
#define CL_SHRT_MAX 32767
|
| 241 |
+
#define CL_SHRT_MIN (-32767-1)
|
| 242 |
+
#define CL_USHRT_MAX 65535
|
| 243 |
+
#define CL_INT_MAX 2147483647
|
| 244 |
+
#define CL_INT_MIN (-2147483647-1)
|
| 245 |
+
#define CL_UINT_MAX 0xffffffffU
|
| 246 |
+
#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL)
|
| 247 |
+
#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)
|
| 248 |
+
#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)
|
| 249 |
+
|
| 250 |
+
#define CL_FLT_DIG 6
|
| 251 |
+
#define CL_FLT_MANT_DIG 24
|
| 252 |
+
#define CL_FLT_MAX_10_EXP +38
|
| 253 |
+
#define CL_FLT_MAX_EXP +128
|
| 254 |
+
#define CL_FLT_MIN_10_EXP -37
|
| 255 |
+
#define CL_FLT_MIN_EXP -125
|
| 256 |
+
#define CL_FLT_RADIX 2
|
| 257 |
+
#define CL_FLT_MAX 340282346638528859811704183484516925440.0f
|
| 258 |
+
#define CL_FLT_MIN 1.175494350822287507969e-38f
|
| 259 |
+
#define CL_FLT_EPSILON 1.1920928955078125e-7f
|
| 260 |
+
|
| 261 |
+
#define CL_HALF_DIG 3
|
| 262 |
+
#define CL_HALF_MANT_DIG 11
|
| 263 |
+
#define CL_HALF_MAX_10_EXP +4
|
| 264 |
+
#define CL_HALF_MAX_EXP +16
|
| 265 |
+
#define CL_HALF_MIN_10_EXP -4
|
| 266 |
+
#define CL_HALF_MIN_EXP -13
|
| 267 |
+
#define CL_HALF_RADIX 2
|
| 268 |
+
#define CL_HALF_MAX 65504.0f
|
| 269 |
+
#define CL_HALF_MIN 6.103515625e-05f
|
| 270 |
+
#define CL_HALF_EPSILON 9.765625e-04f
|
| 271 |
+
|
| 272 |
+
#define CL_DBL_DIG 15
|
| 273 |
+
#define CL_DBL_MANT_DIG 53
|
| 274 |
+
#define CL_DBL_MAX_10_EXP +308
|
| 275 |
+
#define CL_DBL_MAX_EXP +1024
|
| 276 |
+
#define CL_DBL_MIN_10_EXP -307
|
| 277 |
+
#define CL_DBL_MIN_EXP -1021
|
| 278 |
+
#define CL_DBL_RADIX 2
|
| 279 |
+
#define CL_DBL_MAX 1.7976931348623158e+308
|
| 280 |
+
#define CL_DBL_MIN 2.225073858507201383090e-308
|
| 281 |
+
#define CL_DBL_EPSILON 2.220446049250313080847e-16
|
| 282 |
+
|
| 283 |
+
#define CL_M_E 2.7182818284590452354
|
| 284 |
+
#define CL_M_LOG2E 1.4426950408889634074
|
| 285 |
+
#define CL_M_LOG10E 0.43429448190325182765
|
| 286 |
+
#define CL_M_LN2 0.69314718055994530942
|
| 287 |
+
#define CL_M_LN10 2.30258509299404568402
|
| 288 |
+
#define CL_M_PI 3.14159265358979323846
|
| 289 |
+
#define CL_M_PI_2 1.57079632679489661923
|
| 290 |
+
#define CL_M_PI_4 0.78539816339744830962
|
| 291 |
+
#define CL_M_1_PI 0.31830988618379067154
|
| 292 |
+
#define CL_M_2_PI 0.63661977236758134308
|
| 293 |
+
#define CL_M_2_SQRTPI 1.12837916709551257390
|
| 294 |
+
#define CL_M_SQRT2 1.41421356237309504880
|
| 295 |
+
#define CL_M_SQRT1_2 0.70710678118654752440
|
| 296 |
+
|
| 297 |
+
#define CL_M_E_F 2.718281828f
|
| 298 |
+
#define CL_M_LOG2E_F 1.442695041f
|
| 299 |
+
#define CL_M_LOG10E_F 0.434294482f
|
| 300 |
+
#define CL_M_LN2_F 0.693147181f
|
| 301 |
+
#define CL_M_LN10_F 2.302585093f
|
| 302 |
+
#define CL_M_PI_F 3.141592654f
|
| 303 |
+
#define CL_M_PI_2_F 1.570796327f
|
| 304 |
+
#define CL_M_PI_4_F 0.785398163f
|
| 305 |
+
#define CL_M_1_PI_F 0.318309886f
|
| 306 |
+
#define CL_M_2_PI_F 0.636619772f
|
| 307 |
+
#define CL_M_2_SQRTPI_F 1.128379167f
|
| 308 |
+
#define CL_M_SQRT2_F 1.414213562f
|
| 309 |
+
#define CL_M_SQRT1_2_F 0.707106781f
|
| 310 |
+
|
| 311 |
+
#define CL_NAN (CL_INFINITY - CL_INFINITY)
|
| 312 |
+
#define CL_HUGE_VALF ((cl_float) 1e50)
|
| 313 |
+
#define CL_HUGE_VAL ((cl_double) 1e500)
|
| 314 |
+
#define CL_MAXFLOAT CL_FLT_MAX
|
| 315 |
+
#define CL_INFINITY CL_HUGE_VALF
|
| 316 |
+
|
| 317 |
+
#else
|
| 318 |
+
|
| 319 |
+
#include <stdint.h>
|
| 320 |
+
|
| 321 |
+
/* scalar types */
|
| 322 |
+
typedef int8_t cl_char;
|
| 323 |
+
typedef uint8_t cl_uchar;
|
| 324 |
+
typedef int16_t cl_short;
|
| 325 |
+
typedef uint16_t cl_ushort;
|
| 326 |
+
typedef int32_t cl_int;
|
| 327 |
+
typedef uint32_t cl_uint;
|
| 328 |
+
typedef int64_t cl_long;
|
| 329 |
+
typedef uint64_t cl_ulong;
|
| 330 |
+
|
| 331 |
+
typedef uint16_t cl_half;
|
| 332 |
+
typedef float cl_float;
|
| 333 |
+
typedef double cl_double;
|
| 334 |
+
|
| 335 |
+
/* Macro names and corresponding values defined by OpenCL */
|
| 336 |
+
#define CL_CHAR_BIT 8
|
| 337 |
+
#define CL_SCHAR_MAX 127
|
| 338 |
+
#define CL_SCHAR_MIN (-127-1)
|
| 339 |
+
#define CL_CHAR_MAX CL_SCHAR_MAX
|
| 340 |
+
#define CL_CHAR_MIN CL_SCHAR_MIN
|
| 341 |
+
#define CL_UCHAR_MAX 255
|
| 342 |
+
#define CL_SHRT_MAX 32767
|
| 343 |
+
#define CL_SHRT_MIN (-32767-1)
|
| 344 |
+
#define CL_USHRT_MAX 65535
|
| 345 |
+
#define CL_INT_MAX 2147483647
|
| 346 |
+
#define CL_INT_MIN (-2147483647-1)
|
| 347 |
+
#define CL_UINT_MAX 0xffffffffU
|
| 348 |
+
#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL)
|
| 349 |
+
#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)
|
| 350 |
+
#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)
|
| 351 |
+
|
| 352 |
+
#define CL_FLT_DIG 6
|
| 353 |
+
#define CL_FLT_MANT_DIG 24
|
| 354 |
+
#define CL_FLT_MAX_10_EXP +38
|
| 355 |
+
#define CL_FLT_MAX_EXP +128
|
| 356 |
+
#define CL_FLT_MIN_10_EXP -37
|
| 357 |
+
#define CL_FLT_MIN_EXP -125
|
| 358 |
+
#define CL_FLT_RADIX 2
|
| 359 |
+
#define CL_FLT_MAX 340282346638528859811704183484516925440.0f
|
| 360 |
+
#define CL_FLT_MIN 1.175494350822287507969e-38f
|
| 361 |
+
#define CL_FLT_EPSILON 1.1920928955078125e-7f
|
| 362 |
+
|
| 363 |
+
#define CL_HALF_DIG 3
|
| 364 |
+
#define CL_HALF_MANT_DIG 11
|
| 365 |
+
#define CL_HALF_MAX_10_EXP +4
|
| 366 |
+
#define CL_HALF_MAX_EXP +16
|
| 367 |
+
#define CL_HALF_MIN_10_EXP -4
|
| 368 |
+
#define CL_HALF_MIN_EXP -13
|
| 369 |
+
#define CL_HALF_RADIX 2
|
| 370 |
+
#define CL_HALF_MAX 65504.0f
|
| 371 |
+
#define CL_HALF_MIN 6.103515625e-05f
|
| 372 |
+
#define CL_HALF_EPSILON 9.765625e-04f
|
| 373 |
+
|
| 374 |
+
#define CL_DBL_DIG 15
|
| 375 |
+
#define CL_DBL_MANT_DIG 53
|
| 376 |
+
#define CL_DBL_MAX_10_EXP +308
|
| 377 |
+
#define CL_DBL_MAX_EXP +1024
|
| 378 |
+
#define CL_DBL_MIN_10_EXP -307
|
| 379 |
+
#define CL_DBL_MIN_EXP -1021
|
| 380 |
+
#define CL_DBL_RADIX 2
|
| 381 |
+
#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0
|
| 382 |
+
#define CL_DBL_MIN 2.225073858507201383090e-308
|
| 383 |
+
#define CL_DBL_EPSILON 2.220446049250313080847e-16
|
| 384 |
+
|
| 385 |
+
#define CL_M_E 2.7182818284590452354
|
| 386 |
+
#define CL_M_LOG2E 1.4426950408889634074
|
| 387 |
+
#define CL_M_LOG10E 0.43429448190325182765
|
| 388 |
+
#define CL_M_LN2 0.69314718055994530942
|
| 389 |
+
#define CL_M_LN10 2.30258509299404568402
|
| 390 |
+
#define CL_M_PI 3.14159265358979323846
|
| 391 |
+
#define CL_M_PI_2 1.57079632679489661923
|
| 392 |
+
#define CL_M_PI_4 0.78539816339744830962
|
| 393 |
+
#define CL_M_1_PI 0.31830988618379067154
|
| 394 |
+
#define CL_M_2_PI 0.63661977236758134308
|
| 395 |
+
#define CL_M_2_SQRTPI 1.12837916709551257390
|
| 396 |
+
#define CL_M_SQRT2 1.41421356237309504880
|
| 397 |
+
#define CL_M_SQRT1_2 0.70710678118654752440
|
| 398 |
+
|
| 399 |
+
#define CL_M_E_F 2.718281828f
|
| 400 |
+
#define CL_M_LOG2E_F 1.442695041f
|
| 401 |
+
#define CL_M_LOG10E_F 0.434294482f
|
| 402 |
+
#define CL_M_LN2_F 0.693147181f
|
| 403 |
+
#define CL_M_LN10_F 2.302585093f
|
| 404 |
+
#define CL_M_PI_F 3.141592654f
|
| 405 |
+
#define CL_M_PI_2_F 1.570796327f
|
| 406 |
+
#define CL_M_PI_4_F 0.785398163f
|
| 407 |
+
#define CL_M_1_PI_F 0.318309886f
|
| 408 |
+
#define CL_M_2_PI_F 0.636619772f
|
| 409 |
+
#define CL_M_2_SQRTPI_F 1.128379167f
|
| 410 |
+
#define CL_M_SQRT2_F 1.414213562f
|
| 411 |
+
#define CL_M_SQRT1_2_F 0.707106781f
|
| 412 |
+
|
| 413 |
+
#if defined( __GNUC__ )
|
| 414 |
+
#define CL_HUGE_VALF __builtin_huge_valf()
|
| 415 |
+
#define CL_HUGE_VAL __builtin_huge_val()
|
| 416 |
+
#define CL_NAN __builtin_nanf( "" )
|
| 417 |
+
#else
|
| 418 |
+
#define CL_HUGE_VALF ((cl_float) 1e50)
|
| 419 |
+
#define CL_HUGE_VAL ((cl_double) 1e500)
|
| 420 |
+
float nanf( const char * );
|
| 421 |
+
#define CL_NAN nanf( "" )
|
| 422 |
+
#endif
|
| 423 |
+
#define CL_MAXFLOAT CL_FLT_MAX
|
| 424 |
+
#define CL_INFINITY CL_HUGE_VALF
|
| 425 |
+
|
| 426 |
+
#endif
|
| 427 |
+
|
| 428 |
+
#include <stddef.h>
|
| 429 |
+
|
| 430 |
+
/* Mirror types to GL types. Mirror types allow us to avoid deciding which 87s to load based on whether we are using GL or GLES here. */
|
| 431 |
+
typedef unsigned int cl_GLuint;
|
| 432 |
+
typedef int cl_GLint;
|
| 433 |
+
typedef unsigned int cl_GLenum;
|
| 434 |
+
|
| 435 |
+
/*
|
| 436 |
+
* Vector types
|
| 437 |
+
*
|
| 438 |
+
* Note: OpenCL requires that all types be naturally aligned.
|
| 439 |
+
* This means that vector types must be naturally aligned.
|
| 440 |
+
* For example, a vector of four floats must be aligned to
|
| 441 |
+
* a 16 byte boundary (calculated as 4 * the natural 4-byte
|
| 442 |
+
* alignment of the float). The alignment qualifiers here
|
| 443 |
+
* will only function properly if your compiler supports them
|
| 444 |
+
* and if you don't actively work to defeat them. For example,
|
| 445 |
+
* in order for a cl_float4 to be 16 byte aligned in a struct,
|
| 446 |
+
* the start of the struct must itself be 16-byte aligned.
|
| 447 |
+
*
|
| 448 |
+
* Maintaining proper alignment is the user's responsibility.
|
| 449 |
+
*/
|
| 450 |
+
|
| 451 |
+
/* Define basic vector types */
|
| 452 |
+
#if defined( __VEC__ )
|
| 453 |
+
#if !defined(__clang__)
|
| 454 |
+
#include <altivec.h> /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */
|
| 455 |
+
#endif
|
| 456 |
+
typedef __vector unsigned char __cl_uchar16;
|
| 457 |
+
typedef __vector signed char __cl_char16;
|
| 458 |
+
typedef __vector unsigned short __cl_ushort8;
|
| 459 |
+
typedef __vector signed short __cl_short8;
|
| 460 |
+
typedef __vector unsigned int __cl_uint4;
|
| 461 |
+
typedef __vector signed int __cl_int4;
|
| 462 |
+
typedef __vector float __cl_float4;
|
| 463 |
+
#define __CL_UCHAR16__ 1
|
| 464 |
+
#define __CL_CHAR16__ 1
|
| 465 |
+
#define __CL_USHORT8__ 1
|
| 466 |
+
#define __CL_SHORT8__ 1
|
| 467 |
+
#define __CL_UINT4__ 1
|
| 468 |
+
#define __CL_INT4__ 1
|
| 469 |
+
#define __CL_FLOAT4__ 1
|
| 470 |
+
#endif
|
| 471 |
+
|
| 472 |
+
#if defined( __SSE__ )
|
| 473 |
+
#if defined( __MINGW64__ )
|
| 474 |
+
#include <intrin.h>
|
| 475 |
+
#else
|
| 476 |
+
#include <xmmintrin.h>
|
| 477 |
+
#endif
|
| 478 |
+
#if defined( __GNUC__ )
|
| 479 |
+
typedef float __cl_float4 __attribute__((vector_size(16)));
|
| 480 |
+
#else
|
| 481 |
+
typedef __m128 __cl_float4;
|
| 482 |
+
#endif
|
| 483 |
+
#define __CL_FLOAT4__ 1
|
| 484 |
+
#endif
|
| 485 |
+
|
| 486 |
+
#if defined( __SSE2__ )
|
| 487 |
+
#if defined( __MINGW64__ )
|
| 488 |
+
#include <intrin.h>
|
| 489 |
+
#else
|
| 490 |
+
#include <emmintrin.h>
|
| 491 |
+
#endif
|
| 492 |
+
#if defined( __GNUC__ )
|
| 493 |
+
typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16)));
|
| 494 |
+
typedef cl_char __cl_char16 __attribute__((vector_size(16)));
|
| 495 |
+
typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16)));
|
| 496 |
+
typedef cl_short __cl_short8 __attribute__((vector_size(16)));
|
| 497 |
+
typedef cl_uint __cl_uint4 __attribute__((vector_size(16)));
|
| 498 |
+
typedef cl_int __cl_int4 __attribute__((vector_size(16)));
|
| 499 |
+
typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16)));
|
| 500 |
+
typedef cl_long __cl_long2 __attribute__((vector_size(16)));
|
| 501 |
+
typedef cl_double __cl_double2 __attribute__((vector_size(16)));
|
| 502 |
+
#else
|
| 503 |
+
typedef __m128i __cl_uchar16;
|
| 504 |
+
typedef __m128i __cl_char16;
|
| 505 |
+
typedef __m128i __cl_ushort8;
|
| 506 |
+
typedef __m128i __cl_short8;
|
| 507 |
+
typedef __m128i __cl_uint4;
|
| 508 |
+
typedef __m128i __cl_int4;
|
| 509 |
+
typedef __m128i __cl_ulong2;
|
| 510 |
+
typedef __m128i __cl_long2;
|
| 511 |
+
typedef __m128d __cl_double2;
|
| 512 |
+
#endif
|
| 513 |
+
#define __CL_UCHAR16__ 1
|
| 514 |
+
#define __CL_CHAR16__ 1
|
| 515 |
+
#define __CL_USHORT8__ 1
|
| 516 |
+
#define __CL_SHORT8__ 1
|
| 517 |
+
#define __CL_INT4__ 1
|
| 518 |
+
#define __CL_UINT4__ 1
|
| 519 |
+
#define __CL_ULONG2__ 1
|
| 520 |
+
#define __CL_LONG2__ 1
|
| 521 |
+
#define __CL_DOUBLE2__ 1
|
| 522 |
+
#endif
|
| 523 |
+
|
| 524 |
+
#if defined( __MMX__ )
|
| 525 |
+
#include <mmintrin.h>
|
| 526 |
+
#if defined( __GNUC__ )
|
| 527 |
+
typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8)));
|
| 528 |
+
typedef cl_char __cl_char8 __attribute__((vector_size(8)));
|
| 529 |
+
typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8)));
|
| 530 |
+
typedef cl_short __cl_short4 __attribute__((vector_size(8)));
|
| 531 |
+
typedef cl_uint __cl_uint2 __attribute__((vector_size(8)));
|
| 532 |
+
typedef cl_int __cl_int2 __attribute__((vector_size(8)));
|
| 533 |
+
typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8)));
|
| 534 |
+
typedef cl_long __cl_long1 __attribute__((vector_size(8)));
|
| 535 |
+
typedef cl_float __cl_float2 __attribute__((vector_size(8)));
|
| 536 |
+
#else
|
| 537 |
+
typedef __m64 __cl_uchar8;
|
| 538 |
+
typedef __m64 __cl_char8;
|
| 539 |
+
typedef __m64 __cl_ushort4;
|
| 540 |
+
typedef __m64 __cl_short4;
|
| 541 |
+
typedef __m64 __cl_uint2;
|
| 542 |
+
typedef __m64 __cl_int2;
|
| 543 |
+
typedef __m64 __cl_ulong1;
|
| 544 |
+
typedef __m64 __cl_long1;
|
| 545 |
+
typedef __m64 __cl_float2;
|
| 546 |
+
#endif
|
| 547 |
+
#define __CL_UCHAR8__ 1
|
| 548 |
+
#define __CL_CHAR8__ 1
|
| 549 |
+
#define __CL_USHORT4__ 1
|
| 550 |
+
#define __CL_SHORT4__ 1
|
| 551 |
+
#define __CL_INT2__ 1
|
| 552 |
+
#define __CL_UINT2__ 1
|
| 553 |
+
#define __CL_ULONG1__ 1
|
| 554 |
+
#define __CL_LONG1__ 1
|
| 555 |
+
#define __CL_FLOAT2__ 1
|
| 556 |
+
#endif
|
| 557 |
+
|
| 558 |
+
#if defined( __AVX__ )
|
| 559 |
+
#if defined( __MINGW64__ )
|
| 560 |
+
#include <intrin.h>
|
| 561 |
+
#else
|
| 562 |
+
#include <immintrin.h>
|
| 563 |
+
#endif
|
| 564 |
+
#if defined( __GNUC__ )
|
| 565 |
+
typedef cl_float __cl_float8 __attribute__((vector_size(32)));
|
| 566 |
+
typedef cl_double __cl_double4 __attribute__((vector_size(32)));
|
| 567 |
+
#else
|
| 568 |
+
typedef __m256 __cl_float8;
|
| 569 |
+
typedef __m256d __cl_double4;
|
| 570 |
+
#endif
|
| 571 |
+
#define __CL_FLOAT8__ 1
|
| 572 |
+
#define __CL_DOUBLE4__ 1
|
| 573 |
+
#endif
|
| 574 |
+
|
| 575 |
+
/* Define capabilities for anonymous struct members. */
|
| 576 |
+
#if !defined(__cplusplus) && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
|
| 577 |
+
#define __CL_HAS_ANON_STRUCT__ 1
|
| 578 |
+
#define __CL_ANON_STRUCT__
|
| 579 |
+
#elif defined(_WIN32) && defined(_MSC_VER) && !defined(__STDC__)
|
| 580 |
+
#define __CL_HAS_ANON_STRUCT__ 1
|
| 581 |
+
#define __CL_ANON_STRUCT__
|
| 582 |
+
#elif defined(__GNUC__) && ! defined(__STRICT_ANSI__)
|
| 583 |
+
#define __CL_HAS_ANON_STRUCT__ 1
|
| 584 |
+
#define __CL_ANON_STRUCT__ __extension__
|
| 585 |
+
#elif defined(__clang__)
|
| 586 |
+
#define __CL_HAS_ANON_STRUCT__ 1
|
| 587 |
+
#define __CL_ANON_STRUCT__ __extension__
|
| 588 |
+
#else
|
| 589 |
+
#define __CL_HAS_ANON_STRUCT__ 0
|
| 590 |
+
#define __CL_ANON_STRUCT__
|
| 591 |
+
#endif
|
| 592 |
+
|
| 593 |
+
#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__
|
| 594 |
+
/* Disable warning C4201: nonstandard extension used : nameless struct/union */
|
| 595 |
+
#pragma warning( push )
|
| 596 |
+
#pragma warning( disable : 4201 )
|
| 597 |
+
#endif
|
| 598 |
+
|
| 599 |
+
/* Define alignment keys */
|
| 600 |
+
#if defined( __GNUC__ ) || defined(__INTEGRITY)
|
| 601 |
+
#define CL_ALIGNED(_x) __attribute__ ((aligned(_x)))
|
| 602 |
+
#elif defined( _WIN32) && (_MSC_VER)
|
| 603 |
+
/* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */
|
| 604 |
+
/* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */
|
| 605 |
+
/* #include <crtdefs.h> */
|
| 606 |
+
/* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */
|
| 607 |
+
#define CL_ALIGNED(_x)
|
| 608 |
+
#else
|
| 609 |
+
#warning Need to implement some method to align data here
|
| 610 |
+
#define CL_ALIGNED(_x)
|
| 611 |
+
#endif
|
| 612 |
+
|
| 613 |
+
/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */
|
| 614 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 615 |
+
/* .xyzw and .s0123...{f|F} are supported */
|
| 616 |
+
#define CL_HAS_NAMED_VECTOR_FIELDS 1
|
| 617 |
+
/* .hi and .lo are supported */
|
| 618 |
+
#define CL_HAS_HI_LO_VECTOR_FIELDS 1
|
| 619 |
+
#endif
|
| 620 |
+
|
| 621 |
+
/* Define cl_vector types */
|
| 622 |
+
|
| 623 |
+
/* ---- cl_charn ---- */
|
| 624 |
+
typedef union
|
| 625 |
+
{
|
| 626 |
+
cl_char CL_ALIGNED(2) s[2];
|
| 627 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 628 |
+
__CL_ANON_STRUCT__ struct{ cl_char x, y; };
|
| 629 |
+
__CL_ANON_STRUCT__ struct{ cl_char s0, s1; };
|
| 630 |
+
__CL_ANON_STRUCT__ struct{ cl_char lo, hi; };
|
| 631 |
+
#endif
|
| 632 |
+
#if defined( __CL_CHAR2__)
|
| 633 |
+
__cl_char2 v2;
|
| 634 |
+
#endif
|
| 635 |
+
}cl_char2;
|
| 636 |
+
|
| 637 |
+
typedef union
|
| 638 |
+
{
|
| 639 |
+
cl_char CL_ALIGNED(4) s[4];
|
| 640 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 641 |
+
__CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; };
|
| 642 |
+
__CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3; };
|
| 643 |
+
__CL_ANON_STRUCT__ struct{ cl_char2 lo, hi; };
|
| 644 |
+
#endif
|
| 645 |
+
#if defined( __CL_CHAR2__)
|
| 646 |
+
__cl_char2 v2[2];
|
| 647 |
+
#endif
|
| 648 |
+
#if defined( __CL_CHAR4__)
|
| 649 |
+
__cl_char4 v4;
|
| 650 |
+
#endif
|
| 651 |
+
}cl_char4;
|
| 652 |
+
|
| 653 |
+
/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */
|
| 654 |
+
typedef cl_char4 cl_char3;
|
| 655 |
+
|
| 656 |
+
typedef union
|
| 657 |
+
{
|
| 658 |
+
cl_char CL_ALIGNED(8) s[8];
|
| 659 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 660 |
+
__CL_ANON_STRUCT__ struct{ cl_char x, y, z, w; };
|
| 661 |
+
__CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 662 |
+
__CL_ANON_STRUCT__ struct{ cl_char4 lo, hi; };
|
| 663 |
+
#endif
|
| 664 |
+
#if defined( __CL_CHAR2__)
|
| 665 |
+
__cl_char2 v2[4];
|
| 666 |
+
#endif
|
| 667 |
+
#if defined( __CL_CHAR4__)
|
| 668 |
+
__cl_char4 v4[2];
|
| 669 |
+
#endif
|
| 670 |
+
#if defined( __CL_CHAR8__ )
|
| 671 |
+
__cl_char8 v8;
|
| 672 |
+
#endif
|
| 673 |
+
}cl_char8;
|
| 674 |
+
|
| 675 |
+
typedef union
|
| 676 |
+
{
|
| 677 |
+
cl_char CL_ALIGNED(16) s[16];
|
| 678 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 679 |
+
__CL_ANON_STRUCT__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 680 |
+
__CL_ANON_STRUCT__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 681 |
+
__CL_ANON_STRUCT__ struct{ cl_char8 lo, hi; };
|
| 682 |
+
#endif
|
| 683 |
+
#if defined( __CL_CHAR2__)
|
| 684 |
+
__cl_char2 v2[8];
|
| 685 |
+
#endif
|
| 686 |
+
#if defined( __CL_CHAR4__)
|
| 687 |
+
__cl_char4 v4[4];
|
| 688 |
+
#endif
|
| 689 |
+
#if defined( __CL_CHAR8__ )
|
| 690 |
+
__cl_char8 v8[2];
|
| 691 |
+
#endif
|
| 692 |
+
#if defined( __CL_CHAR16__ )
|
| 693 |
+
__cl_char16 v16;
|
| 694 |
+
#endif
|
| 695 |
+
}cl_char16;
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
/* ---- cl_ucharn ---- */
|
| 699 |
+
typedef union
|
| 700 |
+
{
|
| 701 |
+
cl_uchar CL_ALIGNED(2) s[2];
|
| 702 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 703 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar x, y; };
|
| 704 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar s0, s1; };
|
| 705 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar lo, hi; };
|
| 706 |
+
#endif
|
| 707 |
+
#if defined( __cl_uchar2__)
|
| 708 |
+
__cl_uchar2 v2;
|
| 709 |
+
#endif
|
| 710 |
+
}cl_uchar2;
|
| 711 |
+
|
| 712 |
+
typedef union
|
| 713 |
+
{
|
| 714 |
+
cl_uchar CL_ALIGNED(4) s[4];
|
| 715 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 716 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; };
|
| 717 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3; };
|
| 718 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar2 lo, hi; };
|
| 719 |
+
#endif
|
| 720 |
+
#if defined( __CL_UCHAR2__)
|
| 721 |
+
__cl_uchar2 v2[2];
|
| 722 |
+
#endif
|
| 723 |
+
#if defined( __CL_UCHAR4__)
|
| 724 |
+
__cl_uchar4 v4;
|
| 725 |
+
#endif
|
| 726 |
+
}cl_uchar4;
|
| 727 |
+
|
| 728 |
+
/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */
|
| 729 |
+
typedef cl_uchar4 cl_uchar3;
|
| 730 |
+
|
| 731 |
+
typedef union
|
| 732 |
+
{
|
| 733 |
+
cl_uchar CL_ALIGNED(8) s[8];
|
| 734 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 735 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w; };
|
| 736 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 737 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar4 lo, hi; };
|
| 738 |
+
#endif
|
| 739 |
+
#if defined( __CL_UCHAR2__)
|
| 740 |
+
__cl_uchar2 v2[4];
|
| 741 |
+
#endif
|
| 742 |
+
#if defined( __CL_UCHAR4__)
|
| 743 |
+
__cl_uchar4 v4[2];
|
| 744 |
+
#endif
|
| 745 |
+
#if defined( __CL_UCHAR8__ )
|
| 746 |
+
__cl_uchar8 v8;
|
| 747 |
+
#endif
|
| 748 |
+
}cl_uchar8;
|
| 749 |
+
|
| 750 |
+
typedef union
|
| 751 |
+
{
|
| 752 |
+
cl_uchar CL_ALIGNED(16) s[16];
|
| 753 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 754 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 755 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 756 |
+
__CL_ANON_STRUCT__ struct{ cl_uchar8 lo, hi; };
|
| 757 |
+
#endif
|
| 758 |
+
#if defined( __CL_UCHAR2__)
|
| 759 |
+
__cl_uchar2 v2[8];
|
| 760 |
+
#endif
|
| 761 |
+
#if defined( __CL_UCHAR4__)
|
| 762 |
+
__cl_uchar4 v4[4];
|
| 763 |
+
#endif
|
| 764 |
+
#if defined( __CL_UCHAR8__ )
|
| 765 |
+
__cl_uchar8 v8[2];
|
| 766 |
+
#endif
|
| 767 |
+
#if defined( __CL_UCHAR16__ )
|
| 768 |
+
__cl_uchar16 v16;
|
| 769 |
+
#endif
|
| 770 |
+
}cl_uchar16;
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
/* ---- cl_shortn ---- */
|
| 774 |
+
typedef union
|
| 775 |
+
{
|
| 776 |
+
cl_short CL_ALIGNED(4) s[2];
|
| 777 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 778 |
+
__CL_ANON_STRUCT__ struct{ cl_short x, y; };
|
| 779 |
+
__CL_ANON_STRUCT__ struct{ cl_short s0, s1; };
|
| 780 |
+
__CL_ANON_STRUCT__ struct{ cl_short lo, hi; };
|
| 781 |
+
#endif
|
| 782 |
+
#if defined( __CL_SHORT2__)
|
| 783 |
+
__cl_short2 v2;
|
| 784 |
+
#endif
|
| 785 |
+
}cl_short2;
|
| 786 |
+
|
| 787 |
+
typedef union
|
| 788 |
+
{
|
| 789 |
+
cl_short CL_ALIGNED(8) s[4];
|
| 790 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 791 |
+
__CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; };
|
| 792 |
+
__CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3; };
|
| 793 |
+
__CL_ANON_STRUCT__ struct{ cl_short2 lo, hi; };
|
| 794 |
+
#endif
|
| 795 |
+
#if defined( __CL_SHORT2__)
|
| 796 |
+
__cl_short2 v2[2];
|
| 797 |
+
#endif
|
| 798 |
+
#if defined( __CL_SHORT4__)
|
| 799 |
+
__cl_short4 v4;
|
| 800 |
+
#endif
|
| 801 |
+
}cl_short4;
|
| 802 |
+
|
| 803 |
+
/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */
|
| 804 |
+
typedef cl_short4 cl_short3;
|
| 805 |
+
|
| 806 |
+
typedef union
|
| 807 |
+
{
|
| 808 |
+
cl_short CL_ALIGNED(16) s[8];
|
| 809 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 810 |
+
__CL_ANON_STRUCT__ struct{ cl_short x, y, z, w; };
|
| 811 |
+
__CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 812 |
+
__CL_ANON_STRUCT__ struct{ cl_short4 lo, hi; };
|
| 813 |
+
#endif
|
| 814 |
+
#if defined( __CL_SHORT2__)
|
| 815 |
+
__cl_short2 v2[4];
|
| 816 |
+
#endif
|
| 817 |
+
#if defined( __CL_SHORT4__)
|
| 818 |
+
__cl_short4 v4[2];
|
| 819 |
+
#endif
|
| 820 |
+
#if defined( __CL_SHORT8__ )
|
| 821 |
+
__cl_short8 v8;
|
| 822 |
+
#endif
|
| 823 |
+
}cl_short8;
|
| 824 |
+
|
| 825 |
+
typedef union
|
| 826 |
+
{
|
| 827 |
+
cl_short CL_ALIGNED(32) s[16];
|
| 828 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 829 |
+
__CL_ANON_STRUCT__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 830 |
+
__CL_ANON_STRUCT__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 831 |
+
__CL_ANON_STRUCT__ struct{ cl_short8 lo, hi; };
|
| 832 |
+
#endif
|
| 833 |
+
#if defined( __CL_SHORT2__)
|
| 834 |
+
__cl_short2 v2[8];
|
| 835 |
+
#endif
|
| 836 |
+
#if defined( __CL_SHORT4__)
|
| 837 |
+
__cl_short4 v4[4];
|
| 838 |
+
#endif
|
| 839 |
+
#if defined( __CL_SHORT8__ )
|
| 840 |
+
__cl_short8 v8[2];
|
| 841 |
+
#endif
|
| 842 |
+
#if defined( __CL_SHORT16__ )
|
| 843 |
+
__cl_short16 v16;
|
| 844 |
+
#endif
|
| 845 |
+
}cl_short16;
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
/* ---- cl_ushortn ---- */
|
| 849 |
+
typedef union
|
| 850 |
+
{
|
| 851 |
+
cl_ushort CL_ALIGNED(4) s[2];
|
| 852 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 853 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort x, y; };
|
| 854 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort s0, s1; };
|
| 855 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort lo, hi; };
|
| 856 |
+
#endif
|
| 857 |
+
#if defined( __CL_USHORT2__)
|
| 858 |
+
__cl_ushort2 v2;
|
| 859 |
+
#endif
|
| 860 |
+
}cl_ushort2;
|
| 861 |
+
|
| 862 |
+
typedef union
|
| 863 |
+
{
|
| 864 |
+
cl_ushort CL_ALIGNED(8) s[4];
|
| 865 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 866 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; };
|
| 867 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3; };
|
| 868 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort2 lo, hi; };
|
| 869 |
+
#endif
|
| 870 |
+
#if defined( __CL_USHORT2__)
|
| 871 |
+
__cl_ushort2 v2[2];
|
| 872 |
+
#endif
|
| 873 |
+
#if defined( __CL_USHORT4__)
|
| 874 |
+
__cl_ushort4 v4;
|
| 875 |
+
#endif
|
| 876 |
+
}cl_ushort4;
|
| 877 |
+
|
| 878 |
+
/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */
|
| 879 |
+
typedef cl_ushort4 cl_ushort3;
|
| 880 |
+
|
| 881 |
+
typedef union
|
| 882 |
+
{
|
| 883 |
+
cl_ushort CL_ALIGNED(16) s[8];
|
| 884 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 885 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w; };
|
| 886 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 887 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort4 lo, hi; };
|
| 888 |
+
#endif
|
| 889 |
+
#if defined( __CL_USHORT2__)
|
| 890 |
+
__cl_ushort2 v2[4];
|
| 891 |
+
#endif
|
| 892 |
+
#if defined( __CL_USHORT4__)
|
| 893 |
+
__cl_ushort4 v4[2];
|
| 894 |
+
#endif
|
| 895 |
+
#if defined( __CL_USHORT8__ )
|
| 896 |
+
__cl_ushort8 v8;
|
| 897 |
+
#endif
|
| 898 |
+
}cl_ushort8;
|
| 899 |
+
|
| 900 |
+
typedef union
|
| 901 |
+
{
|
| 902 |
+
cl_ushort CL_ALIGNED(32) s[16];
|
| 903 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 904 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 905 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 906 |
+
__CL_ANON_STRUCT__ struct{ cl_ushort8 lo, hi; };
|
| 907 |
+
#endif
|
| 908 |
+
#if defined( __CL_USHORT2__)
|
| 909 |
+
__cl_ushort2 v2[8];
|
| 910 |
+
#endif
|
| 911 |
+
#if defined( __CL_USHORT4__)
|
| 912 |
+
__cl_ushort4 v4[4];
|
| 913 |
+
#endif
|
| 914 |
+
#if defined( __CL_USHORT8__ )
|
| 915 |
+
__cl_ushort8 v8[2];
|
| 916 |
+
#endif
|
| 917 |
+
#if defined( __CL_USHORT16__ )
|
| 918 |
+
__cl_ushort16 v16;
|
| 919 |
+
#endif
|
| 920 |
+
}cl_ushort16;
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
/* ---- cl_halfn ---- */
|
| 924 |
+
typedef union
|
| 925 |
+
{
|
| 926 |
+
cl_half CL_ALIGNED(4) s[2];
|
| 927 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 928 |
+
__CL_ANON_STRUCT__ struct{ cl_half x, y; };
|
| 929 |
+
__CL_ANON_STRUCT__ struct{ cl_half s0, s1; };
|
| 930 |
+
__CL_ANON_STRUCT__ struct{ cl_half lo, hi; };
|
| 931 |
+
#endif
|
| 932 |
+
#if defined( __CL_HALF2__)
|
| 933 |
+
__cl_half2 v2;
|
| 934 |
+
#endif
|
| 935 |
+
}cl_half2;
|
| 936 |
+
|
| 937 |
+
typedef union
|
| 938 |
+
{
|
| 939 |
+
cl_half CL_ALIGNED(8) s[4];
|
| 940 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 941 |
+
__CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; };
|
| 942 |
+
__CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3; };
|
| 943 |
+
__CL_ANON_STRUCT__ struct{ cl_half2 lo, hi; };
|
| 944 |
+
#endif
|
| 945 |
+
#if defined( __CL_HALF2__)
|
| 946 |
+
__cl_half2 v2[2];
|
| 947 |
+
#endif
|
| 948 |
+
#if defined( __CL_HALF4__)
|
| 949 |
+
__cl_half4 v4;
|
| 950 |
+
#endif
|
| 951 |
+
}cl_half4;
|
| 952 |
+
|
| 953 |
+
/* cl_half3 is identical in size, alignment and behavior to cl_half4. See section 6.1.5. */
|
| 954 |
+
typedef cl_half4 cl_half3;
|
| 955 |
+
|
| 956 |
+
typedef union
|
| 957 |
+
{
|
| 958 |
+
cl_half CL_ALIGNED(16) s[8];
|
| 959 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 960 |
+
__CL_ANON_STRUCT__ struct{ cl_half x, y, z, w; };
|
| 961 |
+
__CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 962 |
+
__CL_ANON_STRUCT__ struct{ cl_half4 lo, hi; };
|
| 963 |
+
#endif
|
| 964 |
+
#if defined( __CL_HALF2__)
|
| 965 |
+
__cl_half2 v2[4];
|
| 966 |
+
#endif
|
| 967 |
+
#if defined( __CL_HALF4__)
|
| 968 |
+
__cl_half4 v4[2];
|
| 969 |
+
#endif
|
| 970 |
+
#if defined( __CL_HALF8__ )
|
| 971 |
+
__cl_half8 v8;
|
| 972 |
+
#endif
|
| 973 |
+
}cl_half8;
|
| 974 |
+
|
| 975 |
+
typedef union
|
| 976 |
+
{
|
| 977 |
+
cl_half CL_ALIGNED(32) s[16];
|
| 978 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 979 |
+
__CL_ANON_STRUCT__ struct{ cl_half x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 980 |
+
__CL_ANON_STRUCT__ struct{ cl_half s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 981 |
+
__CL_ANON_STRUCT__ struct{ cl_half8 lo, hi; };
|
| 982 |
+
#endif
|
| 983 |
+
#if defined( __CL_HALF2__)
|
| 984 |
+
__cl_half2 v2[8];
|
| 985 |
+
#endif
|
| 986 |
+
#if defined( __CL_HALF4__)
|
| 987 |
+
__cl_half4 v4[4];
|
| 988 |
+
#endif
|
| 989 |
+
#if defined( __CL_HALF8__ )
|
| 990 |
+
__cl_half8 v8[2];
|
| 991 |
+
#endif
|
| 992 |
+
#if defined( __CL_HALF16__ )
|
| 993 |
+
__cl_half16 v16;
|
| 994 |
+
#endif
|
| 995 |
+
}cl_half16;
|
| 996 |
+
|
| 997 |
+
/* ---- cl_intn ---- */
|
| 998 |
+
typedef union
|
| 999 |
+
{
|
| 1000 |
+
cl_int CL_ALIGNED(8) s[2];
|
| 1001 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1002 |
+
__CL_ANON_STRUCT__ struct{ cl_int x, y; };
|
| 1003 |
+
__CL_ANON_STRUCT__ struct{ cl_int s0, s1; };
|
| 1004 |
+
__CL_ANON_STRUCT__ struct{ cl_int lo, hi; };
|
| 1005 |
+
#endif
|
| 1006 |
+
#if defined( __CL_INT2__)
|
| 1007 |
+
__cl_int2 v2;
|
| 1008 |
+
#endif
|
| 1009 |
+
}cl_int2;
|
| 1010 |
+
|
| 1011 |
+
typedef union
|
| 1012 |
+
{
|
| 1013 |
+
cl_int CL_ALIGNED(16) s[4];
|
| 1014 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1015 |
+
__CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; };
|
| 1016 |
+
__CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3; };
|
| 1017 |
+
__CL_ANON_STRUCT__ struct{ cl_int2 lo, hi; };
|
| 1018 |
+
#endif
|
| 1019 |
+
#if defined( __CL_INT2__)
|
| 1020 |
+
__cl_int2 v2[2];
|
| 1021 |
+
#endif
|
| 1022 |
+
#if defined( __CL_INT4__)
|
| 1023 |
+
__cl_int4 v4;
|
| 1024 |
+
#endif
|
| 1025 |
+
}cl_int4;
|
| 1026 |
+
|
| 1027 |
+
/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */
|
| 1028 |
+
typedef cl_int4 cl_int3;
|
| 1029 |
+
|
| 1030 |
+
typedef union
|
| 1031 |
+
{
|
| 1032 |
+
cl_int CL_ALIGNED(32) s[8];
|
| 1033 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1034 |
+
__CL_ANON_STRUCT__ struct{ cl_int x, y, z, w; };
|
| 1035 |
+
__CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 1036 |
+
__CL_ANON_STRUCT__ struct{ cl_int4 lo, hi; };
|
| 1037 |
+
#endif
|
| 1038 |
+
#if defined( __CL_INT2__)
|
| 1039 |
+
__cl_int2 v2[4];
|
| 1040 |
+
#endif
|
| 1041 |
+
#if defined( __CL_INT4__)
|
| 1042 |
+
__cl_int4 v4[2];
|
| 1043 |
+
#endif
|
| 1044 |
+
#if defined( __CL_INT8__ )
|
| 1045 |
+
__cl_int8 v8;
|
| 1046 |
+
#endif
|
| 1047 |
+
}cl_int8;
|
| 1048 |
+
|
| 1049 |
+
typedef union
|
| 1050 |
+
{
|
| 1051 |
+
cl_int CL_ALIGNED(64) s[16];
|
| 1052 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1053 |
+
__CL_ANON_STRUCT__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 1054 |
+
__CL_ANON_STRUCT__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 1055 |
+
__CL_ANON_STRUCT__ struct{ cl_int8 lo, hi; };
|
| 1056 |
+
#endif
|
| 1057 |
+
#if defined( __CL_INT2__)
|
| 1058 |
+
__cl_int2 v2[8];
|
| 1059 |
+
#endif
|
| 1060 |
+
#if defined( __CL_INT4__)
|
| 1061 |
+
__cl_int4 v4[4];
|
| 1062 |
+
#endif
|
| 1063 |
+
#if defined( __CL_INT8__ )
|
| 1064 |
+
__cl_int8 v8[2];
|
| 1065 |
+
#endif
|
| 1066 |
+
#if defined( __CL_INT16__ )
|
| 1067 |
+
__cl_int16 v16;
|
| 1068 |
+
#endif
|
| 1069 |
+
}cl_int16;
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
/* ---- cl_uintn ---- */
|
| 1073 |
+
typedef union
|
| 1074 |
+
{
|
| 1075 |
+
cl_uint CL_ALIGNED(8) s[2];
|
| 1076 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1077 |
+
__CL_ANON_STRUCT__ struct{ cl_uint x, y; };
|
| 1078 |
+
__CL_ANON_STRUCT__ struct{ cl_uint s0, s1; };
|
| 1079 |
+
__CL_ANON_STRUCT__ struct{ cl_uint lo, hi; };
|
| 1080 |
+
#endif
|
| 1081 |
+
#if defined( __CL_UINT2__)
|
| 1082 |
+
__cl_uint2 v2;
|
| 1083 |
+
#endif
|
| 1084 |
+
}cl_uint2;
|
| 1085 |
+
|
| 1086 |
+
typedef union
|
| 1087 |
+
{
|
| 1088 |
+
cl_uint CL_ALIGNED(16) s[4];
|
| 1089 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1090 |
+
__CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; };
|
| 1091 |
+
__CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3; };
|
| 1092 |
+
__CL_ANON_STRUCT__ struct{ cl_uint2 lo, hi; };
|
| 1093 |
+
#endif
|
| 1094 |
+
#if defined( __CL_UINT2__)
|
| 1095 |
+
__cl_uint2 v2[2];
|
| 1096 |
+
#endif
|
| 1097 |
+
#if defined( __CL_UINT4__)
|
| 1098 |
+
__cl_uint4 v4;
|
| 1099 |
+
#endif
|
| 1100 |
+
}cl_uint4;
|
| 1101 |
+
|
| 1102 |
+
/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */
|
| 1103 |
+
typedef cl_uint4 cl_uint3;
|
| 1104 |
+
|
| 1105 |
+
typedef union
|
| 1106 |
+
{
|
| 1107 |
+
cl_uint CL_ALIGNED(32) s[8];
|
| 1108 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1109 |
+
__CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w; };
|
| 1110 |
+
__CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 1111 |
+
__CL_ANON_STRUCT__ struct{ cl_uint4 lo, hi; };
|
| 1112 |
+
#endif
|
| 1113 |
+
#if defined( __CL_UINT2__)
|
| 1114 |
+
__cl_uint2 v2[4];
|
| 1115 |
+
#endif
|
| 1116 |
+
#if defined( __CL_UINT4__)
|
| 1117 |
+
__cl_uint4 v4[2];
|
| 1118 |
+
#endif
|
| 1119 |
+
#if defined( __CL_UINT8__ )
|
| 1120 |
+
__cl_uint8 v8;
|
| 1121 |
+
#endif
|
| 1122 |
+
}cl_uint8;
|
| 1123 |
+
|
| 1124 |
+
typedef union
|
| 1125 |
+
{
|
| 1126 |
+
cl_uint CL_ALIGNED(64) s[16];
|
| 1127 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1128 |
+
__CL_ANON_STRUCT__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 1129 |
+
__CL_ANON_STRUCT__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 1130 |
+
__CL_ANON_STRUCT__ struct{ cl_uint8 lo, hi; };
|
| 1131 |
+
#endif
|
| 1132 |
+
#if defined( __CL_UINT2__)
|
| 1133 |
+
__cl_uint2 v2[8];
|
| 1134 |
+
#endif
|
| 1135 |
+
#if defined( __CL_UINT4__)
|
| 1136 |
+
__cl_uint4 v4[4];
|
| 1137 |
+
#endif
|
| 1138 |
+
#if defined( __CL_UINT8__ )
|
| 1139 |
+
__cl_uint8 v8[2];
|
| 1140 |
+
#endif
|
| 1141 |
+
#if defined( __CL_UINT16__ )
|
| 1142 |
+
__cl_uint16 v16;
|
| 1143 |
+
#endif
|
| 1144 |
+
}cl_uint16;
|
| 1145 |
+
|
| 1146 |
+
/* ---- cl_longn ---- */
|
| 1147 |
+
typedef union
|
| 1148 |
+
{
|
| 1149 |
+
cl_long CL_ALIGNED(16) s[2];
|
| 1150 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1151 |
+
__CL_ANON_STRUCT__ struct{ cl_long x, y; };
|
| 1152 |
+
__CL_ANON_STRUCT__ struct{ cl_long s0, s1; };
|
| 1153 |
+
__CL_ANON_STRUCT__ struct{ cl_long lo, hi; };
|
| 1154 |
+
#endif
|
| 1155 |
+
#if defined( __CL_LONG2__)
|
| 1156 |
+
__cl_long2 v2;
|
| 1157 |
+
#endif
|
| 1158 |
+
}cl_long2;
|
| 1159 |
+
|
| 1160 |
+
typedef union
|
| 1161 |
+
{
|
| 1162 |
+
cl_long CL_ALIGNED(32) s[4];
|
| 1163 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1164 |
+
__CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; };
|
| 1165 |
+
__CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3; };
|
| 1166 |
+
__CL_ANON_STRUCT__ struct{ cl_long2 lo, hi; };
|
| 1167 |
+
#endif
|
| 1168 |
+
#if defined( __CL_LONG2__)
|
| 1169 |
+
__cl_long2 v2[2];
|
| 1170 |
+
#endif
|
| 1171 |
+
#if defined( __CL_LONG4__)
|
| 1172 |
+
__cl_long4 v4;
|
| 1173 |
+
#endif
|
| 1174 |
+
}cl_long4;
|
| 1175 |
+
|
| 1176 |
+
/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */
|
| 1177 |
+
typedef cl_long4 cl_long3;
|
| 1178 |
+
|
| 1179 |
+
typedef union
|
| 1180 |
+
{
|
| 1181 |
+
cl_long CL_ALIGNED(64) s[8];
|
| 1182 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1183 |
+
__CL_ANON_STRUCT__ struct{ cl_long x, y, z, w; };
|
| 1184 |
+
__CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 1185 |
+
__CL_ANON_STRUCT__ struct{ cl_long4 lo, hi; };
|
| 1186 |
+
#endif
|
| 1187 |
+
#if defined( __CL_LONG2__)
|
| 1188 |
+
__cl_long2 v2[4];
|
| 1189 |
+
#endif
|
| 1190 |
+
#if defined( __CL_LONG4__)
|
| 1191 |
+
__cl_long4 v4[2];
|
| 1192 |
+
#endif
|
| 1193 |
+
#if defined( __CL_LONG8__ )
|
| 1194 |
+
__cl_long8 v8;
|
| 1195 |
+
#endif
|
| 1196 |
+
}cl_long8;
|
| 1197 |
+
|
| 1198 |
+
typedef union
|
| 1199 |
+
{
|
| 1200 |
+
cl_long CL_ALIGNED(128) s[16];
|
| 1201 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1202 |
+
__CL_ANON_STRUCT__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 1203 |
+
__CL_ANON_STRUCT__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 1204 |
+
__CL_ANON_STRUCT__ struct{ cl_long8 lo, hi; };
|
| 1205 |
+
#endif
|
| 1206 |
+
#if defined( __CL_LONG2__)
|
| 1207 |
+
__cl_long2 v2[8];
|
| 1208 |
+
#endif
|
| 1209 |
+
#if defined( __CL_LONG4__)
|
| 1210 |
+
__cl_long4 v4[4];
|
| 1211 |
+
#endif
|
| 1212 |
+
#if defined( __CL_LONG8__ )
|
| 1213 |
+
__cl_long8 v8[2];
|
| 1214 |
+
#endif
|
| 1215 |
+
#if defined( __CL_LONG16__ )
|
| 1216 |
+
__cl_long16 v16;
|
| 1217 |
+
#endif
|
| 1218 |
+
}cl_long16;
|
| 1219 |
+
|
| 1220 |
+
|
| 1221 |
+
/* ---- cl_ulongn ---- */
|
| 1222 |
+
typedef union
|
| 1223 |
+
{
|
| 1224 |
+
cl_ulong CL_ALIGNED(16) s[2];
|
| 1225 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1226 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong x, y; };
|
| 1227 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong s0, s1; };
|
| 1228 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong lo, hi; };
|
| 1229 |
+
#endif
|
| 1230 |
+
#if defined( __CL_ULONG2__)
|
| 1231 |
+
__cl_ulong2 v2;
|
| 1232 |
+
#endif
|
| 1233 |
+
}cl_ulong2;
|
| 1234 |
+
|
| 1235 |
+
typedef union
|
| 1236 |
+
{
|
| 1237 |
+
cl_ulong CL_ALIGNED(32) s[4];
|
| 1238 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1239 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; };
|
| 1240 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3; };
|
| 1241 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong2 lo, hi; };
|
| 1242 |
+
#endif
|
| 1243 |
+
#if defined( __CL_ULONG2__)
|
| 1244 |
+
__cl_ulong2 v2[2];
|
| 1245 |
+
#endif
|
| 1246 |
+
#if defined( __CL_ULONG4__)
|
| 1247 |
+
__cl_ulong4 v4;
|
| 1248 |
+
#endif
|
| 1249 |
+
}cl_ulong4;
|
| 1250 |
+
|
| 1251 |
+
/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */
|
| 1252 |
+
typedef cl_ulong4 cl_ulong3;
|
| 1253 |
+
|
| 1254 |
+
typedef union
|
| 1255 |
+
{
|
| 1256 |
+
cl_ulong CL_ALIGNED(64) s[8];
|
| 1257 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1258 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w; };
|
| 1259 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 1260 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong4 lo, hi; };
|
| 1261 |
+
#endif
|
| 1262 |
+
#if defined( __CL_ULONG2__)
|
| 1263 |
+
__cl_ulong2 v2[4];
|
| 1264 |
+
#endif
|
| 1265 |
+
#if defined( __CL_ULONG4__)
|
| 1266 |
+
__cl_ulong4 v4[2];
|
| 1267 |
+
#endif
|
| 1268 |
+
#if defined( __CL_ULONG8__ )
|
| 1269 |
+
__cl_ulong8 v8;
|
| 1270 |
+
#endif
|
| 1271 |
+
}cl_ulong8;
|
| 1272 |
+
|
| 1273 |
+
typedef union
|
| 1274 |
+
{
|
| 1275 |
+
cl_ulong CL_ALIGNED(128) s[16];
|
| 1276 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1277 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 1278 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 1279 |
+
__CL_ANON_STRUCT__ struct{ cl_ulong8 lo, hi; };
|
| 1280 |
+
#endif
|
| 1281 |
+
#if defined( __CL_ULONG2__)
|
| 1282 |
+
__cl_ulong2 v2[8];
|
| 1283 |
+
#endif
|
| 1284 |
+
#if defined( __CL_ULONG4__)
|
| 1285 |
+
__cl_ulong4 v4[4];
|
| 1286 |
+
#endif
|
| 1287 |
+
#if defined( __CL_ULONG8__ )
|
| 1288 |
+
__cl_ulong8 v8[2];
|
| 1289 |
+
#endif
|
| 1290 |
+
#if defined( __CL_ULONG16__ )
|
| 1291 |
+
__cl_ulong16 v16;
|
| 1292 |
+
#endif
|
| 1293 |
+
}cl_ulong16;
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
/* --- cl_floatn ---- */
|
| 1297 |
+
|
| 1298 |
+
typedef union
|
| 1299 |
+
{
|
| 1300 |
+
cl_float CL_ALIGNED(8) s[2];
|
| 1301 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1302 |
+
__CL_ANON_STRUCT__ struct{ cl_float x, y; };
|
| 1303 |
+
__CL_ANON_STRUCT__ struct{ cl_float s0, s1; };
|
| 1304 |
+
__CL_ANON_STRUCT__ struct{ cl_float lo, hi; };
|
| 1305 |
+
#endif
|
| 1306 |
+
#if defined( __CL_FLOAT2__)
|
| 1307 |
+
__cl_float2 v2;
|
| 1308 |
+
#endif
|
| 1309 |
+
}cl_float2;
|
| 1310 |
+
|
| 1311 |
+
typedef union
|
| 1312 |
+
{
|
| 1313 |
+
cl_float CL_ALIGNED(16) s[4];
|
| 1314 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1315 |
+
__CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; };
|
| 1316 |
+
__CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3; };
|
| 1317 |
+
__CL_ANON_STRUCT__ struct{ cl_float2 lo, hi; };
|
| 1318 |
+
#endif
|
| 1319 |
+
#if defined( __CL_FLOAT2__)
|
| 1320 |
+
__cl_float2 v2[2];
|
| 1321 |
+
#endif
|
| 1322 |
+
#if defined( __CL_FLOAT4__)
|
| 1323 |
+
__cl_float4 v4;
|
| 1324 |
+
#endif
|
| 1325 |
+
}cl_float4;
|
| 1326 |
+
|
| 1327 |
+
/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */
|
| 1328 |
+
typedef cl_float4 cl_float3;
|
| 1329 |
+
|
| 1330 |
+
typedef union
|
| 1331 |
+
{
|
| 1332 |
+
cl_float CL_ALIGNED(32) s[8];
|
| 1333 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1334 |
+
__CL_ANON_STRUCT__ struct{ cl_float x, y, z, w; };
|
| 1335 |
+
__CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 1336 |
+
__CL_ANON_STRUCT__ struct{ cl_float4 lo, hi; };
|
| 1337 |
+
#endif
|
| 1338 |
+
#if defined( __CL_FLOAT2__)
|
| 1339 |
+
__cl_float2 v2[4];
|
| 1340 |
+
#endif
|
| 1341 |
+
#if defined( __CL_FLOAT4__)
|
| 1342 |
+
__cl_float4 v4[2];
|
| 1343 |
+
#endif
|
| 1344 |
+
#if defined( __CL_FLOAT8__ )
|
| 1345 |
+
__cl_float8 v8;
|
| 1346 |
+
#endif
|
| 1347 |
+
}cl_float8;
|
| 1348 |
+
|
| 1349 |
+
typedef union
|
| 1350 |
+
{
|
| 1351 |
+
cl_float CL_ALIGNED(64) s[16];
|
| 1352 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1353 |
+
__CL_ANON_STRUCT__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 1354 |
+
__CL_ANON_STRUCT__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 1355 |
+
__CL_ANON_STRUCT__ struct{ cl_float8 lo, hi; };
|
| 1356 |
+
#endif
|
| 1357 |
+
#if defined( __CL_FLOAT2__)
|
| 1358 |
+
__cl_float2 v2[8];
|
| 1359 |
+
#endif
|
| 1360 |
+
#if defined( __CL_FLOAT4__)
|
| 1361 |
+
__cl_float4 v4[4];
|
| 1362 |
+
#endif
|
| 1363 |
+
#if defined( __CL_FLOAT8__ )
|
| 1364 |
+
__cl_float8 v8[2];
|
| 1365 |
+
#endif
|
| 1366 |
+
#if defined( __CL_FLOAT16__ )
|
| 1367 |
+
__cl_float16 v16;
|
| 1368 |
+
#endif
|
| 1369 |
+
}cl_float16;
|
| 1370 |
+
|
| 1371 |
+
/* --- cl_doublen ---- */
|
| 1372 |
+
|
| 1373 |
+
typedef union
|
| 1374 |
+
{
|
| 1375 |
+
cl_double CL_ALIGNED(16) s[2];
|
| 1376 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1377 |
+
__CL_ANON_STRUCT__ struct{ cl_double x, y; };
|
| 1378 |
+
__CL_ANON_STRUCT__ struct{ cl_double s0, s1; };
|
| 1379 |
+
__CL_ANON_STRUCT__ struct{ cl_double lo, hi; };
|
| 1380 |
+
#endif
|
| 1381 |
+
#if defined( __CL_DOUBLE2__)
|
| 1382 |
+
__cl_double2 v2;
|
| 1383 |
+
#endif
|
| 1384 |
+
}cl_double2;
|
| 1385 |
+
|
| 1386 |
+
typedef union
|
| 1387 |
+
{
|
| 1388 |
+
cl_double CL_ALIGNED(32) s[4];
|
| 1389 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1390 |
+
__CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; };
|
| 1391 |
+
__CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3; };
|
| 1392 |
+
__CL_ANON_STRUCT__ struct{ cl_double2 lo, hi; };
|
| 1393 |
+
#endif
|
| 1394 |
+
#if defined( __CL_DOUBLE2__)
|
| 1395 |
+
__cl_double2 v2[2];
|
| 1396 |
+
#endif
|
| 1397 |
+
#if defined( __CL_DOUBLE4__)
|
| 1398 |
+
__cl_double4 v4;
|
| 1399 |
+
#endif
|
| 1400 |
+
}cl_double4;
|
| 1401 |
+
|
| 1402 |
+
/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */
|
| 1403 |
+
typedef cl_double4 cl_double3;
|
| 1404 |
+
|
| 1405 |
+
typedef union
|
| 1406 |
+
{
|
| 1407 |
+
cl_double CL_ALIGNED(64) s[8];
|
| 1408 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1409 |
+
__CL_ANON_STRUCT__ struct{ cl_double x, y, z, w; };
|
| 1410 |
+
__CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; };
|
| 1411 |
+
__CL_ANON_STRUCT__ struct{ cl_double4 lo, hi; };
|
| 1412 |
+
#endif
|
| 1413 |
+
#if defined( __CL_DOUBLE2__)
|
| 1414 |
+
__cl_double2 v2[4];
|
| 1415 |
+
#endif
|
| 1416 |
+
#if defined( __CL_DOUBLE4__)
|
| 1417 |
+
__cl_double4 v4[2];
|
| 1418 |
+
#endif
|
| 1419 |
+
#if defined( __CL_DOUBLE8__ )
|
| 1420 |
+
__cl_double8 v8;
|
| 1421 |
+
#endif
|
| 1422 |
+
}cl_double8;
|
| 1423 |
+
|
| 1424 |
+
typedef union
|
| 1425 |
+
{
|
| 1426 |
+
cl_double CL_ALIGNED(128) s[16];
|
| 1427 |
+
#if __CL_HAS_ANON_STRUCT__
|
| 1428 |
+
__CL_ANON_STRUCT__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
|
| 1429 |
+
__CL_ANON_STRUCT__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
|
| 1430 |
+
__CL_ANON_STRUCT__ struct{ cl_double8 lo, hi; };
|
| 1431 |
+
#endif
|
| 1432 |
+
#if defined( __CL_DOUBLE2__)
|
| 1433 |
+
__cl_double2 v2[8];
|
| 1434 |
+
#endif
|
| 1435 |
+
#if defined( __CL_DOUBLE4__)
|
| 1436 |
+
__cl_double4 v4[4];
|
| 1437 |
+
#endif
|
| 1438 |
+
#if defined( __CL_DOUBLE8__ )
|
| 1439 |
+
__cl_double8 v8[2];
|
| 1440 |
+
#endif
|
| 1441 |
+
#if defined( __CL_DOUBLE16__ )
|
| 1442 |
+
__cl_double16 v16;
|
| 1443 |
+
#endif
|
| 1444 |
+
}cl_double16;
|
| 1445 |
+
|
| 1446 |
+
/* Macro to facilitate debugging
|
| 1447 |
+
* Usage:
|
| 1448 |
+
* Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source.
|
| 1449 |
+
* The first line ends with: CL_PROGRAM_STRING_DEBUG_INFO \"
|
| 1450 |
+
* Each line thereafter of OpenCL C source must end with: \n\
|
| 1451 |
+
* The last line ends in ";
|
| 1452 |
+
*
|
| 1453 |
+
* Example:
|
| 1454 |
+
*
|
| 1455 |
+
* const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO "\
|
| 1456 |
+
* kernel void foo( int a, float * b ) \n\
|
| 1457 |
+
* { \n\
|
| 1458 |
+
* // my comment \n\
|
| 1459 |
+
* *b[ get_global_id(0)] = a; \n\
|
| 1460 |
+
* } \n\
|
| 1461 |
+
* ";
|
| 1462 |
+
*
|
| 1463 |
+
* This should correctly set up the line, (column) and file information for your source
|
| 1464 |
+
* string so you can do source level debugging.
|
| 1465 |
+
*/
|
| 1466 |
+
#define __CL_STRINGIFY( _x ) # _x
|
| 1467 |
+
#define _CL_STRINGIFY( _x ) __CL_STRINGIFY( _x )
|
| 1468 |
+
#define CL_PROGRAM_STRING_DEBUG_INFO "#line " _CL_STRINGIFY(__LINE__) " \"" __FILE__ "\" \n\n"
|
| 1469 |
+
|
| 1470 |
+
#ifdef __cplusplus
|
| 1471 |
+
}
|
| 1472 |
+
#endif
|
| 1473 |
+
|
| 1474 |
+
#if defined(_WIN32) && defined(_MSC_VER) && __CL_HAS_ANON_STRUCT__
|
| 1475 |
+
#pragma warning( pop )
|
| 1476 |
+
#endif
|
| 1477 |
+
|
| 1478 |
+
#endif /* __CL_PLATFORM_H */
|
miniCUDA124/include/CL/cl_version.h
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright (c) 2018-2020 The Khronos Group Inc.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef __CL_VERSION_H
|
| 18 |
+
#define __CL_VERSION_H
|
| 19 |
+
|
| 20 |
+
/* Detect which version to target */
|
| 21 |
+
#if !defined(CL_TARGET_OPENCL_VERSION)
|
| 22 |
+
#pragma message("cl_version.h: CL_TARGET_OPENCL_VERSION is not defined. Defaulting to 300 (OpenCL 3.0)")
|
| 23 |
+
#define CL_TARGET_OPENCL_VERSION 300
|
| 24 |
+
#endif
|
| 25 |
+
#if CL_TARGET_OPENCL_VERSION != 100 && \
|
| 26 |
+
CL_TARGET_OPENCL_VERSION != 110 && \
|
| 27 |
+
CL_TARGET_OPENCL_VERSION != 120 && \
|
| 28 |
+
CL_TARGET_OPENCL_VERSION != 200 && \
|
| 29 |
+
CL_TARGET_OPENCL_VERSION != 210 && \
|
| 30 |
+
CL_TARGET_OPENCL_VERSION != 220 && \
|
| 31 |
+
CL_TARGET_OPENCL_VERSION != 300
|
| 32 |
+
#pragma message("cl_version: CL_TARGET_OPENCL_VERSION is not a valid value (100, 110, 120, 200, 210, 220, 300). Defaulting to 300 (OpenCL 3.0)")
|
| 33 |
+
#undef CL_TARGET_OPENCL_VERSION
|
| 34 |
+
#define CL_TARGET_OPENCL_VERSION 300
|
| 35 |
+
#endif
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
/* OpenCL Version */
|
| 39 |
+
#if CL_TARGET_OPENCL_VERSION >= 300 && !defined(CL_VERSION_3_0)
|
| 40 |
+
#define CL_VERSION_3_0 1
|
| 41 |
+
#endif
|
| 42 |
+
#if CL_TARGET_OPENCL_VERSION >= 220 && !defined(CL_VERSION_2_2)
|
| 43 |
+
#define CL_VERSION_2_2 1
|
| 44 |
+
#endif
|
| 45 |
+
#if CL_TARGET_OPENCL_VERSION >= 210 && !defined(CL_VERSION_2_1)
|
| 46 |
+
#define CL_VERSION_2_1 1
|
| 47 |
+
#endif
|
| 48 |
+
#if CL_TARGET_OPENCL_VERSION >= 200 && !defined(CL_VERSION_2_0)
|
| 49 |
+
#define CL_VERSION_2_0 1
|
| 50 |
+
#endif
|
| 51 |
+
#if CL_TARGET_OPENCL_VERSION >= 120 && !defined(CL_VERSION_1_2)
|
| 52 |
+
#define CL_VERSION_1_2 1
|
| 53 |
+
#endif
|
| 54 |
+
#if CL_TARGET_OPENCL_VERSION >= 110 && !defined(CL_VERSION_1_1)
|
| 55 |
+
#define CL_VERSION_1_1 1
|
| 56 |
+
#endif
|
| 57 |
+
#if CL_TARGET_OPENCL_VERSION >= 100 && !defined(CL_VERSION_1_0)
|
| 58 |
+
#define CL_VERSION_1_0 1
|
| 59 |
+
#endif
|
| 60 |
+
|
| 61 |
+
/* Allow deprecated APIs for older OpenCL versions. */
|
| 62 |
+
#if CL_TARGET_OPENCL_VERSION <= 220 && !defined(CL_USE_DEPRECATED_OPENCL_2_2_APIS)
|
| 63 |
+
#define CL_USE_DEPRECATED_OPENCL_2_2_APIS
|
| 64 |
+
#endif
|
| 65 |
+
#if CL_TARGET_OPENCL_VERSION <= 210 && !defined(CL_USE_DEPRECATED_OPENCL_2_1_APIS)
|
| 66 |
+
#define CL_USE_DEPRECATED_OPENCL_2_1_APIS
|
| 67 |
+
#endif
|
| 68 |
+
#if CL_TARGET_OPENCL_VERSION <= 200 && !defined(CL_USE_DEPRECATED_OPENCL_2_0_APIS)
|
| 69 |
+
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS
|
| 70 |
+
#endif
|
| 71 |
+
#if CL_TARGET_OPENCL_VERSION <= 120 && !defined(CL_USE_DEPRECATED_OPENCL_1_2_APIS)
|
| 72 |
+
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
|
| 73 |
+
#endif
|
| 74 |
+
#if CL_TARGET_OPENCL_VERSION <= 110 && !defined(CL_USE_DEPRECATED_OPENCL_1_1_APIS)
|
| 75 |
+
#define CL_USE_DEPRECATED_OPENCL_1_1_APIS
|
| 76 |
+
#endif
|
| 77 |
+
#if CL_TARGET_OPENCL_VERSION <= 100 && !defined(CL_USE_DEPRECATED_OPENCL_1_0_APIS)
|
| 78 |
+
#define CL_USE_DEPRECATED_OPENCL_1_0_APIS
|
| 79 |
+
#endif
|
| 80 |
+
|
| 81 |
+
#endif /* __CL_VERSION_H */
|
miniCUDA124/include/CL/opencl.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright (c) 2008-2021 The Khronos Group Inc.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef __OPENCL_H
|
| 18 |
+
#define __OPENCL_H
|
| 19 |
+
|
| 20 |
+
#ifdef __cplusplus
|
| 21 |
+
extern "C" {
|
| 22 |
+
#endif
|
| 23 |
+
|
| 24 |
+
#include <CL/cl.h>
|
| 25 |
+
#include <CL/cl_gl.h>
|
| 26 |
+
#include <CL/cl_ext.h>
|
| 27 |
+
|
| 28 |
+
#ifdef __cplusplus
|
| 29 |
+
}
|
| 30 |
+
#endif
|
| 31 |
+
|
| 32 |
+
#endif /* __OPENCL_H */
|
miniCUDA124/include/cooperative_groups/memcpy_async.h
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _COOPERATIVE_GROUPS_MEMCPY_ASYNC
|
| 50 |
+
#define _COOPERATIVE_GROUPS_MEMCPY_ASYNC
|
| 51 |
+
|
| 52 |
+
#include "../cooperative_groups.h"
|
| 53 |
+
#include "details/info.h"
|
| 54 |
+
|
| 55 |
+
#ifdef _CG_CPP11_FEATURES
|
| 56 |
+
# include "details/async.h"
|
| 57 |
+
#else
|
| 58 |
+
# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| 59 |
+
-std=c++11 compiler option.
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
#endif // _COOPERATIVE_GROUPS_MEMCPY_ASYNC
|
miniCUDA124/include/cooperative_groups/reduce.h
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _COOPERATIVE_GROUPS_REDUCE_H
|
| 50 |
+
#define _COOPERATIVE_GROUPS_REDUCE_H
|
| 51 |
+
|
| 52 |
+
#include "../cooperative_groups.h"
|
| 53 |
+
#include "details/info.h"
|
| 54 |
+
|
| 55 |
+
#ifdef _CG_CPP11_FEATURES
|
| 56 |
+
# include "details/reduce.h"
|
| 57 |
+
#else
|
| 58 |
+
# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| 59 |
+
-std=c++11 compiler option.
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
#endif //_COOPERATIVE_GROUPS_REDUCE_H
|
miniCUDA124/include/cooperative_groups/scan.h
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
|
| 2 |
+
*
|
| 3 |
+
* NOTICE TO LICENSEE:
|
| 4 |
+
*
|
| 5 |
+
* The source code and/or documentation ("Licensed Deliverables") are
|
| 6 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 7 |
+
* international Copyright laws.
|
| 8 |
+
*
|
| 9 |
+
* The Licensed Deliverables contained herein are PROPRIETARY and
|
| 10 |
+
* CONFIDENTIAL to NVIDIA and are being provided under the terms and
|
| 11 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 12 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 13 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 14 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 15 |
+
* of the Licensed Deliverables to any third party without the express
|
| 16 |
+
* written consent of NVIDIA is prohibited.
|
| 17 |
+
*
|
| 18 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 19 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 20 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
|
| 21 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 22 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 23 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 25 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 26 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 27 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 28 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 29 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 30 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 31 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 32 |
+
*
|
| 33 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 34 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 35 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 36 |
+
* computer software documentation" as such terms are used in 48
|
| 37 |
+
* C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
|
| 38 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 39 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 40 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 41 |
+
* only those rights set forth herein.
|
| 42 |
+
*
|
| 43 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 44 |
+
* software must include, in the user documentation and internal
|
| 45 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 46 |
+
* Users Notice.
|
| 47 |
+
*/
|
| 48 |
+
|
| 49 |
+
#ifndef _COOPERATIVE_GROUPS_SCAN_H
|
| 50 |
+
#define _COOPERATIVE_GROUPS_SCAN_H
|
| 51 |
+
|
| 52 |
+
#include "../cooperative_groups.h"
|
| 53 |
+
#include "details/info.h"
|
| 54 |
+
|
| 55 |
+
#ifdef _CG_CPP11_FEATURES
|
| 56 |
+
# include "details/scan.h"
|
| 57 |
+
#else
|
| 58 |
+
# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| 59 |
+
-std=c++11 compiler option.
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
#endif //_COOPERATIVE_GROUPS_SCAN_H
|
miniCUDA124/include/crt/common_functions.h
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/common_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__COMMON_FUNCTIONS_H__)
|
| 61 |
+
#define __COMMON_FUNCTIONS_H__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#include "builtin_types.h"
|
| 72 |
+
#include "host_defines.h"
|
| 73 |
+
|
| 74 |
+
#define __CUDACC_VER__ "__CUDACC_VER__ is no longer supported. Use __CUDACC_VER_MAJOR__, __CUDACC_VER_MINOR__, and __CUDACC_VER_BUILD__ instead."
|
| 75 |
+
|
| 76 |
+
#ifndef __CUDA_API_VER_MAJOR__
|
| 77 |
+
#define __CUDA_API_VER_MAJOR__ __CUDACC_VER_MAJOR__
|
| 78 |
+
#endif /* __CUDA_API_VER_MAJOR__ */
|
| 79 |
+
|
| 80 |
+
#ifndef __CUDA_API_VER_MINOR__
|
| 81 |
+
#define __CUDA_API_VER_MINOR__ __CUDACC_VER_MINOR__
|
| 82 |
+
#endif /* __CUDA_API_VER_MINOR__ */
|
| 83 |
+
|
| 84 |
+
#if !defined(__CUDACC_RTC__)
|
| 85 |
+
#include <string.h>
|
| 86 |
+
#include <time.h>
|
| 87 |
+
|
| 88 |
+
extern "C"
|
| 89 |
+
{
|
| 90 |
+
#endif /* !__CUDACC_RTC__ */
|
| 91 |
+
extern _CRTIMP __host__ __device__ __device_builtin__ __cudart_builtin__ clock_t __cdecl clock(void)
|
| 92 |
+
#if defined(__QNX__)
|
| 93 |
+
asm("clock32")
|
| 94 |
+
#endif
|
| 95 |
+
__THROW;
|
| 96 |
+
extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memset(void*, int, size_t) __THROW;
|
| 97 |
+
extern __host__ __device__ __device_builtin__ __cudart_builtin__ void* __cdecl memcpy(void*, const void*, size_t) __THROW;
|
| 98 |
+
#if !defined(__CUDACC_RTC__)
|
| 99 |
+
}
|
| 100 |
+
#endif /* !__CUDACC_RTC__ */
|
| 101 |
+
|
| 102 |
+
#if defined(__CUDA_ARCH__)
|
| 103 |
+
|
| 104 |
+
#if defined(__CUDACC_RTC__)
|
| 105 |
+
inline __host__ __device__ void* operator new(size_t, void *p) { return p; }
|
| 106 |
+
inline __host__ __device__ void* operator new[](size_t, void *p) { return p; }
|
| 107 |
+
inline __host__ __device__ void operator delete(void*, void*) { }
|
| 108 |
+
inline __host__ __device__ void operator delete[](void*, void*) { }
|
| 109 |
+
#else /* !__CUDACC_RTC__ */
|
| 110 |
+
#ifndef __CUDA_INTERNAL_SKIP_CPP_HEADERS__
|
| 111 |
+
#include <new>
|
| 112 |
+
#endif
|
| 113 |
+
|
| 114 |
+
#if defined (__GNUC__)
|
| 115 |
+
|
| 116 |
+
#define STD \
|
| 117 |
+
std::
|
| 118 |
+
|
| 119 |
+
#else /* __GNUC__ */
|
| 120 |
+
|
| 121 |
+
#define STD
|
| 122 |
+
|
| 123 |
+
#endif /* __GNUC__ */
|
| 124 |
+
|
| 125 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, void*) throw();
|
| 126 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, void*) throw();
|
| 127 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, void*) throw();
|
| 128 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, void*) throw();
|
| 129 |
+
# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__)
|
| 130 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw();
|
| 131 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw();
|
| 132 |
+
#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */
|
| 133 |
+
#endif /* __CUDACC_RTC__ */
|
| 134 |
+
|
| 135 |
+
#if !defined(__CUDACC_RTC__)
|
| 136 |
+
#include <stdio.h>
|
| 137 |
+
#include <stdlib.h>
|
| 138 |
+
#endif /* !__CUDACC_RTC__ */
|
| 139 |
+
|
| 140 |
+
#if defined(__QNX__) && !defined(_LIBCPP_VERSION)
|
| 141 |
+
namespace std {
|
| 142 |
+
#endif
|
| 143 |
+
extern "C"
|
| 144 |
+
{
|
| 145 |
+
extern
|
| 146 |
+
#if !defined(_MSC_VER) || _MSC_VER < 1900
|
| 147 |
+
_CRTIMP
|
| 148 |
+
#endif
|
| 149 |
+
|
| 150 |
+
#if defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) )
|
| 151 |
+
__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...) __THROW;
|
| 152 |
+
#else /* newer glibc */
|
| 153 |
+
__host__ __device__ __device_builtin__ __cudart_builtin__ int __cdecl printf(const char*, ...);
|
| 154 |
+
#endif /* defined(__GLIBC__) && defined(__GLIBC_MINOR__) && ( (__GLIBC__ < 2) || ( (__GLIBC__ == 2) && (__GLIBC_MINOR__ < 3) ) ) */
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
extern _CRTIMP __host__ __device__ __cudart_builtin__ void* __cdecl malloc(size_t) __THROW;
|
| 158 |
+
extern _CRTIMP __host__ __device__ __cudart_builtin__ void __cdecl free(void*) __THROW;
|
| 159 |
+
|
| 160 |
+
#if defined(_MSC_VER)
|
| 161 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl _alloca(size_t);
|
| 162 |
+
#endif
|
| 163 |
+
|
| 164 |
+
#if defined(__QNX__)
|
| 165 |
+
#undef alloca
|
| 166 |
+
#define alloca(__S) __builtin_alloca(__S)
|
| 167 |
+
#endif
|
| 168 |
+
}
|
| 169 |
+
#if defined(__QNX__) && !defined(_LIBCPP_VERSION)
|
| 170 |
+
} /* std */
|
| 171 |
+
#endif
|
| 172 |
+
|
| 173 |
+
#if !defined(__CUDACC_RTC__)
|
| 174 |
+
#include <assert.h>
|
| 175 |
+
#endif /* !__CUDACC_RTC__ */
|
| 176 |
+
|
| 177 |
+
extern "C"
|
| 178 |
+
{
|
| 179 |
+
#if defined(__CUDACC_RTC__)
|
| 180 |
+
extern __host__ __device__ void __assertfail(const char * __assertion,
|
| 181 |
+
const char *__file,
|
| 182 |
+
unsigned int __line,
|
| 183 |
+
const char *__function,
|
| 184 |
+
size_t charsize);
|
| 185 |
+
#elif defined(__APPLE__)
|
| 186 |
+
#define __builtin_expect(exp,c) (exp)
|
| 187 |
+
extern __host__ __device__ __cudart_builtin__ void __assert_rtn(
|
| 188 |
+
const char *, const char *, int, const char *);
|
| 189 |
+
#elif defined(__ANDROID__)
|
| 190 |
+
extern __host__ __device__ __cudart_builtin__ void __assert2(
|
| 191 |
+
const char *, int, const char *, const char *);
|
| 192 |
+
#elif defined(__QNX__)
|
| 193 |
+
#if !defined(_LIBCPP_VERSION)
|
| 194 |
+
namespace std {
|
| 195 |
+
#endif
|
| 196 |
+
extern __host__ __device__ __cudart_builtin__ void __assert(
|
| 197 |
+
const char *, const char *, unsigned int, const char *);
|
| 198 |
+
#if !defined(_LIBCPP_VERSION)
|
| 199 |
+
}
|
| 200 |
+
#endif
|
| 201 |
+
#elif defined(__HORIZON__)
|
| 202 |
+
extern __host__ __device__ __cudart_builtin__ void __assert_fail(
|
| 203 |
+
const char *, const char *, int, const char *);
|
| 204 |
+
#elif defined(__GNUC__)
|
| 205 |
+
extern __host__ __device__ __cudart_builtin__ void __assert_fail(
|
| 206 |
+
const char *, const char *, unsigned int, const char *)
|
| 207 |
+
__THROW;
|
| 208 |
+
#elif defined(_WIN32)
|
| 209 |
+
extern __host__ __device__ __cudart_builtin__ _CRTIMP void __cdecl _wassert(
|
| 210 |
+
const wchar_t *, const wchar_t *, unsigned);
|
| 211 |
+
#endif
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
#if defined(__CUDACC_RTC__)
|
| 215 |
+
#ifdef NDEBUG
|
| 216 |
+
#define assert(e) (static_cast<void>(0))
|
| 217 |
+
#else /* !NDEBUG */
|
| 218 |
+
#define __ASSERT_STR_HELPER(x) #x
|
| 219 |
+
#define assert(e) ((e) ? static_cast<void>(0)\
|
| 220 |
+
: __assertfail(__ASSERT_STR_HELPER(e), __FILE__,\
|
| 221 |
+
__LINE__, __PRETTY_FUNCTION__,\
|
| 222 |
+
sizeof(char)))
|
| 223 |
+
#endif /* NDEBUG */
|
| 224 |
+
__host__ __device__ void* operator new(size_t);
|
| 225 |
+
__host__ __device__ void* operator new[](size_t);
|
| 226 |
+
__host__ __device__ void operator delete(void*);
|
| 227 |
+
__host__ __device__ void operator delete[](void*);
|
| 228 |
+
# if __cplusplus >= 201402L
|
| 229 |
+
__host__ __device__ void operator delete(void*, size_t);
|
| 230 |
+
__host__ __device__ void operator delete[](void*, size_t);
|
| 231 |
+
#endif /* __cplusplus >= 201402L */
|
| 232 |
+
|
| 233 |
+
#if __cplusplus >= 201703L
|
| 234 |
+
namespace std { enum class align_val_t : size_t {}; }
|
| 235 |
+
__host__ __device__ void* __cdecl operator new(size_t sz, std::align_val_t) noexcept;
|
| 236 |
+
__host__ __device__ void* __cdecl operator new[](size_t sz, std::align_val_t) noexcept;
|
| 237 |
+
__host__ __device__ void __cdecl operator delete(void* ptr, std::align_val_t) noexcept;
|
| 238 |
+
__host__ __device__ void __cdecl operator delete[](void* ptr, std::align_val_t) noexcept;
|
| 239 |
+
__host__ __device__ void __cdecl operator delete(void* ptr, size_t, std::align_val_t) noexcept;
|
| 240 |
+
__host__ __device__ void __cdecl operator delete[](void* ptr, size_t, std::align_val_t) noexcept;
|
| 241 |
+
#endif /* __cplusplus >= 201703L */
|
| 242 |
+
|
| 243 |
+
#else /* !__CUDACC_RTC__ */
|
| 244 |
+
#if defined (__GNUC__)
|
| 245 |
+
|
| 246 |
+
#define __NV_GLIBCXX_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
|
| 247 |
+
|
| 248 |
+
#if (__cplusplus >= 201103L) && ((!(defined(__QNX__) && defined(_LIBCPP_VERSION))) || (defined(__QNX__) && __NV_GLIBCXX_VERSION >= 80300))
|
| 249 |
+
#define THROWBADALLOC
|
| 250 |
+
#else
|
| 251 |
+
#if defined(__ANDROID__) && !defined(_LIBCPP_VERSION) && (defined(__BIONIC__) || __NV_GLIBCXX_VERSION < 40900)
|
| 252 |
+
#define THROWBADALLOC
|
| 253 |
+
#else
|
| 254 |
+
#define THROWBADALLOC throw(STD bad_alloc)
|
| 255 |
+
#endif
|
| 256 |
+
#endif
|
| 257 |
+
#define __DELETE_THROW throw()
|
| 258 |
+
|
| 259 |
+
#undef __NV_GLIBCXX_VERSION
|
| 260 |
+
|
| 261 |
+
#else /* __GNUC__ */
|
| 262 |
+
|
| 263 |
+
#define THROWBADALLOC throw(...)
|
| 264 |
+
|
| 265 |
+
#endif /* __GNUC__ */
|
| 266 |
+
|
| 267 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t) THROWBADALLOC;
|
| 268 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t) THROWBADALLOC;
|
| 269 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*) throw();
|
| 270 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*) throw();
|
| 271 |
+
# if __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__)
|
| 272 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t) throw();
|
| 273 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t) throw();
|
| 274 |
+
#endif /* __cplusplus >= 201402L || (defined(_MSC_VER) && _MSC_VER >= 1900) || defined(__CUDA_XLC_CPP14__) || defined(__CUDA_ICC_CPP14__) */
|
| 275 |
+
|
| 276 |
+
#if __cpp_aligned_new
|
| 277 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new(STD size_t, std::align_val_t);
|
| 278 |
+
extern __host__ __device__ __cudart_builtin__ void* __cdecl operator new[](STD size_t, std::align_val_t);
|
| 279 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, std::align_val_t) noexcept;
|
| 280 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, std::align_val_t) noexcept;
|
| 281 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete(void*, STD size_t, std::align_val_t) noexcept;
|
| 282 |
+
extern __host__ __device__ __cudart_builtin__ void __cdecl operator delete[](void*, STD size_t, std::align_val_t) noexcept;
|
| 283 |
+
#endif /* __cpp_aligned_new */
|
| 284 |
+
|
| 285 |
+
#undef THROWBADALLOC
|
| 286 |
+
#undef STD
|
| 287 |
+
#endif /* __CUDACC_RTC__ */
|
| 288 |
+
|
| 289 |
+
#endif /* __CUDA_ARCH__ */
|
| 290 |
+
|
| 291 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 292 |
+
|
| 293 |
+
/*******************************************************************************
|
| 294 |
+
* *
|
| 295 |
+
* *
|
| 296 |
+
* *
|
| 297 |
+
*******************************************************************************/
|
| 298 |
+
|
| 299 |
+
#if defined(__CUDACC_RTC__) && (__CUDA_ARCH__ >= 350)
|
| 300 |
+
#include "cuda_device_runtime_api.h"
|
| 301 |
+
#endif
|
| 302 |
+
|
| 303 |
+
#include "math_functions.h"
|
| 304 |
+
|
| 305 |
+
#endif /* !__COMMON_FUNCTIONS_H__ */
|
| 306 |
+
|
| 307 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__)
|
| 308 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 309 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H__
|
| 310 |
+
#endif
|
miniCUDA124/include/crt/cudacc_ext.h
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2021-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/cudacc_ext.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__)
|
| 62 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 63 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDACC_EXT_H__
|
| 64 |
+
#endif
|
miniCUDA124/include/crt/device_double_functions.h
ADDED
|
@@ -0,0 +1,1192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/device_double_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__DEVICE_DOUBLE_FUNCTIONS_H__)
|
| 61 |
+
#define __DEVICE_DOUBLE_FUNCTIONS_H__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
/*******************************************************************************
|
| 72 |
+
* *
|
| 73 |
+
* *
|
| 74 |
+
* *
|
| 75 |
+
*******************************************************************************/
|
| 76 |
+
|
| 77 |
+
#if defined(__CUDACC_RTC__)
|
| 78 |
+
#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__
|
| 79 |
+
#else
|
| 80 |
+
#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__
|
| 81 |
+
#endif /* __CUDACC_RTC__ */
|
| 82 |
+
|
| 83 |
+
#include "builtin_types.h"
|
| 84 |
+
#include "device_types.h"
|
| 85 |
+
#include "host_defines.h"
|
| 86 |
+
|
| 87 |
+
//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
|
| 88 |
+
#define EXCLUDE_FROM_RTC
|
| 89 |
+
|
| 90 |
+
extern "C"
|
| 91 |
+
{
|
| 92 |
+
/**
|
| 93 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 94 |
+
* \brief Reinterpret bits in a double as a 64-bit signed integer.
|
| 95 |
+
*
|
| 96 |
+
* Reinterpret the bits in the double-precision floating-point value \p x
|
| 97 |
+
* as a signed 64-bit integer.
|
| 98 |
+
* \return Returns reinterpreted value.
|
| 99 |
+
*/
|
| 100 |
+
extern __device__ __device_builtin__ long long int __double_as_longlong(double x);
|
| 101 |
+
/**
|
| 102 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 103 |
+
* \brief Reinterpret bits in a 64-bit signed integer as a double.
|
| 104 |
+
*
|
| 105 |
+
* Reinterpret the bits in the 64-bit signed integer value \p x as
|
| 106 |
+
* a double-precision floating-point value.
|
| 107 |
+
* \return Returns reinterpreted value.
|
| 108 |
+
*/
|
| 109 |
+
extern __device__ __device_builtin__ double __longlong_as_double(long long int x);
|
| 110 |
+
/**
|
| 111 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 112 |
+
* \brief Compute
|
| 113 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 114 |
+
* \xmlonly
|
| 115 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 116 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 117 |
+
* <m:mi>x</m:mi>
|
| 118 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 119 |
+
* <m:mi>y</m:mi>
|
| 120 |
+
* <m:mo>+</m:mo>
|
| 121 |
+
* <m:mi>z</m:mi>
|
| 122 |
+
* </m:math>
|
| 123 |
+
* </d4p_MathML>
|
| 124 |
+
* \endxmlonly
|
| 125 |
+
* as a single operation in round-to-nearest-even mode.
|
| 126 |
+
*
|
| 127 |
+
* Computes the value of
|
| 128 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 129 |
+
* \xmlonly
|
| 130 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 131 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 132 |
+
* <m:mi>x</m:mi>
|
| 133 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 134 |
+
* <m:mi>y</m:mi>
|
| 135 |
+
* <m:mo>+</m:mo>
|
| 136 |
+
* <m:mi>z</m:mi>
|
| 137 |
+
* </m:math>
|
| 138 |
+
* </d4p_MathML>
|
| 139 |
+
* \endxmlonly
|
| 140 |
+
* as a single ternary operation, rounding the
|
| 141 |
+
* result once in round-to-nearest-even mode.
|
| 142 |
+
*
|
| 143 |
+
* \return Returns the rounded value of
|
| 144 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 145 |
+
* \xmlonly
|
| 146 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 147 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 148 |
+
* <m:mi>x</m:mi>
|
| 149 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 150 |
+
* <m:mi>y</m:mi>
|
| 151 |
+
* <m:mo>+</m:mo>
|
| 152 |
+
* <m:mi>z</m:mi>
|
| 153 |
+
* </m:math>
|
| 154 |
+
* </d4p_MathML>
|
| 155 |
+
* \endxmlonly
|
| 156 |
+
* as a single operation.
|
| 157 |
+
* - fmaf(
|
| 158 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 159 |
+
* \xmlonly
|
| 160 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 161 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 162 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 163 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 164 |
+
* </m:math>
|
| 165 |
+
* </d4p_MathML>
|
| 166 |
+
* \endxmlonly
|
| 167 |
+
* ,
|
| 168 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 169 |
+
* \xmlonly
|
| 170 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 171 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 172 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 173 |
+
* <m:mn>0</m:mn>
|
| 174 |
+
* </m:math>
|
| 175 |
+
* </d4p_MathML>
|
| 176 |
+
* \endxmlonly
|
| 177 |
+
* , \p z) returns NaN.
|
| 178 |
+
* - fmaf(
|
| 179 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 180 |
+
* \xmlonly
|
| 181 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 182 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 183 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 184 |
+
* <m:mn>0</m:mn>
|
| 185 |
+
* </m:math>
|
| 186 |
+
* </d4p_MathML>
|
| 187 |
+
* \endxmlonly
|
| 188 |
+
* ,
|
| 189 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 190 |
+
* \xmlonly
|
| 191 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 192 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 193 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 194 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 195 |
+
* </m:math>
|
| 196 |
+
* </d4p_MathML>
|
| 197 |
+
* \endxmlonly
|
| 198 |
+
* , \p z) returns NaN.
|
| 199 |
+
* - fmaf(\p x, \p y,
|
| 200 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 201 |
+
* \xmlonly
|
| 202 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 203 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 204 |
+
* <m:mo>-</m:mo>
|
| 205 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 206 |
+
* </m:math>
|
| 207 |
+
* </d4p_MathML>
|
| 208 |
+
* \endxmlonly
|
| 209 |
+
* ) returns NaN if
|
| 210 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 211 |
+
* \xmlonly
|
| 212 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 213 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 214 |
+
* <m:mi>x</m:mi>
|
| 215 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 216 |
+
* <m:mi>y</m:mi>
|
| 217 |
+
* </m:math>
|
| 218 |
+
* </d4p_MathML>
|
| 219 |
+
* \endxmlonly
|
| 220 |
+
* is an exact
|
| 221 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 222 |
+
* \xmlonly
|
| 223 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 224 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 225 |
+
* <m:mo>+</m:mo>
|
| 226 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 227 |
+
* </m:math>
|
| 228 |
+
* </d4p_MathML>
|
| 229 |
+
* \endxmlonly
|
| 230 |
+
* .
|
| 231 |
+
* - fmaf(\p x, \p y,
|
| 232 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 233 |
+
* \xmlonly
|
| 234 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 235 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 236 |
+
* <m:mo>+</m:mo>
|
| 237 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 238 |
+
* </m:math>
|
| 239 |
+
* </d4p_MathML>
|
| 240 |
+
* \endxmlonly
|
| 241 |
+
* ) returns NaN if
|
| 242 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 243 |
+
* \xmlonly
|
| 244 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 245 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 246 |
+
* <m:mi>x</m:mi>
|
| 247 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 248 |
+
* <m:mi>y</m:mi>
|
| 249 |
+
* </m:math>
|
| 250 |
+
* </d4p_MathML>
|
| 251 |
+
* \endxmlonly
|
| 252 |
+
* is an exact
|
| 253 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 254 |
+
* \xmlonly
|
| 255 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 256 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 257 |
+
* <m:mo>-</m:mo>
|
| 258 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 259 |
+
* </m:math>
|
| 260 |
+
* </d4p_MathML>
|
| 261 |
+
* \endxmlonly
|
| 262 |
+
* .
|
| 263 |
+
*
|
| 264 |
+
* \note_accuracy_double
|
| 265 |
+
*/
|
| 266 |
+
extern __device__ __device_builtin__ double __fma_rn(double x, double y, double z);
|
| 267 |
+
/**
|
| 268 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 269 |
+
* \brief Compute
|
| 270 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 271 |
+
* \xmlonly
|
| 272 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 273 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 274 |
+
* <m:mi>x</m:mi>
|
| 275 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 276 |
+
* <m:mi>y</m:mi>
|
| 277 |
+
* <m:mo>+</m:mo>
|
| 278 |
+
* <m:mi>z</m:mi>
|
| 279 |
+
* </m:math>
|
| 280 |
+
* </d4p_MathML>
|
| 281 |
+
* \endxmlonly
|
| 282 |
+
* as a single operation in round-towards-zero mode.
|
| 283 |
+
*
|
| 284 |
+
* Computes the value of
|
| 285 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 286 |
+
* \xmlonly
|
| 287 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 288 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 289 |
+
* <m:mi>x</m:mi>
|
| 290 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 291 |
+
* <m:mi>y</m:mi>
|
| 292 |
+
* <m:mo>+</m:mo>
|
| 293 |
+
* <m:mi>z</m:mi>
|
| 294 |
+
* </m:math>
|
| 295 |
+
* </d4p_MathML>
|
| 296 |
+
* \endxmlonly
|
| 297 |
+
* as a single ternary operation, rounding the
|
| 298 |
+
* result once in round-towards-zero mode.
|
| 299 |
+
*
|
| 300 |
+
* \return Returns the rounded value of
|
| 301 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 302 |
+
* \xmlonly
|
| 303 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 304 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 305 |
+
* <m:mi>x</m:mi>
|
| 306 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 307 |
+
* <m:mi>y</m:mi>
|
| 308 |
+
* <m:mo>+</m:mo>
|
| 309 |
+
* <m:mi>z</m:mi>
|
| 310 |
+
* </m:math>
|
| 311 |
+
* </d4p_MathML>
|
| 312 |
+
* \endxmlonly
|
| 313 |
+
* as a single operation.
|
| 314 |
+
* - fmaf(
|
| 315 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 316 |
+
* \xmlonly
|
| 317 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 318 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 319 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 320 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 321 |
+
* </m:math>
|
| 322 |
+
* </d4p_MathML>
|
| 323 |
+
* \endxmlonly
|
| 324 |
+
* ,
|
| 325 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 326 |
+
* \xmlonly
|
| 327 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 328 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 329 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 330 |
+
* <m:mn>0</m:mn>
|
| 331 |
+
* </m:math>
|
| 332 |
+
* </d4p_MathML>
|
| 333 |
+
* \endxmlonly
|
| 334 |
+
* , \p z) returns NaN.
|
| 335 |
+
* - fmaf(
|
| 336 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 337 |
+
* \xmlonly
|
| 338 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 339 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 340 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 341 |
+
* <m:mn>0</m:mn>
|
| 342 |
+
* </m:math>
|
| 343 |
+
* </d4p_MathML>
|
| 344 |
+
* \endxmlonly
|
| 345 |
+
* ,
|
| 346 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 347 |
+
* \xmlonly
|
| 348 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 349 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 350 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 351 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 352 |
+
* </m:math>
|
| 353 |
+
* </d4p_MathML>
|
| 354 |
+
* \endxmlonly
|
| 355 |
+
* , \p z) returns NaN.
|
| 356 |
+
* - fmaf(\p x, \p y,
|
| 357 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 358 |
+
* \xmlonly
|
| 359 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 360 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 361 |
+
* <m:mo>-</m:mo>
|
| 362 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 363 |
+
* </m:math>
|
| 364 |
+
* </d4p_MathML>
|
| 365 |
+
* \endxmlonly
|
| 366 |
+
* ) returns NaN if
|
| 367 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 368 |
+
* \xmlonly
|
| 369 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 370 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 371 |
+
* <m:mi>x</m:mi>
|
| 372 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 373 |
+
* <m:mi>y</m:mi>
|
| 374 |
+
* </m:math>
|
| 375 |
+
* </d4p_MathML>
|
| 376 |
+
* \endxmlonly
|
| 377 |
+
* is an exact
|
| 378 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 379 |
+
* \xmlonly
|
| 380 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 381 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 382 |
+
* <m:mo>+</m:mo>
|
| 383 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 384 |
+
* </m:math>
|
| 385 |
+
* </d4p_MathML>
|
| 386 |
+
* \endxmlonly
|
| 387 |
+
* .
|
| 388 |
+
* - fmaf(\p x, \p y,
|
| 389 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 390 |
+
* \xmlonly
|
| 391 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 392 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 393 |
+
* <m:mo>+</m:mo>
|
| 394 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 395 |
+
* </m:math>
|
| 396 |
+
* </d4p_MathML>
|
| 397 |
+
* \endxmlonly
|
| 398 |
+
* ) returns NaN if
|
| 399 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 400 |
+
* \xmlonly
|
| 401 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 402 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 403 |
+
* <m:mi>x</m:mi>
|
| 404 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 405 |
+
* <m:mi>y</m:mi>
|
| 406 |
+
* </m:math>
|
| 407 |
+
* </d4p_MathML>
|
| 408 |
+
* \endxmlonly
|
| 409 |
+
* is an exact
|
| 410 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 411 |
+
* \xmlonly
|
| 412 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 413 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 414 |
+
* <m:mo>-</m:mo>
|
| 415 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 416 |
+
* </m:math>
|
| 417 |
+
* </d4p_MathML>
|
| 418 |
+
* \endxmlonly
|
| 419 |
+
* .
|
| 420 |
+
*
|
| 421 |
+
* \note_accuracy_double
|
| 422 |
+
*/
|
| 423 |
+
extern __device__ __device_builtin__ double __fma_rz(double x, double y, double z);
|
| 424 |
+
/**
|
| 425 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 426 |
+
* \brief Compute
|
| 427 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 428 |
+
* \xmlonly
|
| 429 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 430 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 431 |
+
* <m:mi>x</m:mi>
|
| 432 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 433 |
+
* <m:mi>y</m:mi>
|
| 434 |
+
* <m:mo>+</m:mo>
|
| 435 |
+
* <m:mi>z</m:mi>
|
| 436 |
+
* </m:math>
|
| 437 |
+
* </d4p_MathML>
|
| 438 |
+
* \endxmlonly
|
| 439 |
+
* as a single operation in round-up mode.
|
| 440 |
+
*
|
| 441 |
+
* Computes the value of
|
| 442 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 443 |
+
* \xmlonly
|
| 444 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 445 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 446 |
+
* <m:mi>x</m:mi>
|
| 447 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 448 |
+
* <m:mi>y</m:mi>
|
| 449 |
+
* <m:mo>+</m:mo>
|
| 450 |
+
* <m:mi>z</m:mi>
|
| 451 |
+
* </m:math>
|
| 452 |
+
* </d4p_MathML>
|
| 453 |
+
* \endxmlonly
|
| 454 |
+
* as a single ternary operation, rounding the
|
| 455 |
+
* result once in round-up (to positive infinity) mode.
|
| 456 |
+
*
|
| 457 |
+
* \return Returns the rounded value of
|
| 458 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 459 |
+
* \xmlonly
|
| 460 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 461 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 462 |
+
* <m:mi>x</m:mi>
|
| 463 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 464 |
+
* <m:mi>y</m:mi>
|
| 465 |
+
* <m:mo>+</m:mo>
|
| 466 |
+
* <m:mi>z</m:mi>
|
| 467 |
+
* </m:math>
|
| 468 |
+
* </d4p_MathML>
|
| 469 |
+
* \endxmlonly
|
| 470 |
+
* as a single operation.
|
| 471 |
+
* - fmaf(
|
| 472 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 473 |
+
* \xmlonly
|
| 474 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 475 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 476 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 477 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 478 |
+
* </m:math>
|
| 479 |
+
* </d4p_MathML>
|
| 480 |
+
* \endxmlonly
|
| 481 |
+
* ,
|
| 482 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 483 |
+
* \xmlonly
|
| 484 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 485 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 486 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 487 |
+
* <m:mn>0</m:mn>
|
| 488 |
+
* </m:math>
|
| 489 |
+
* </d4p_MathML>
|
| 490 |
+
* \endxmlonly
|
| 491 |
+
* , \p z) returns NaN.
|
| 492 |
+
* - fmaf(
|
| 493 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 494 |
+
* \xmlonly
|
| 495 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 496 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 497 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 498 |
+
* <m:mn>0</m:mn>
|
| 499 |
+
* </m:math>
|
| 500 |
+
* </d4p_MathML>
|
| 501 |
+
* \endxmlonly
|
| 502 |
+
* ,
|
| 503 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 504 |
+
* \xmlonly
|
| 505 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 506 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 507 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 508 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 509 |
+
* </m:math>
|
| 510 |
+
* </d4p_MathML>
|
| 511 |
+
* \endxmlonly
|
| 512 |
+
* , \p z) returns NaN.
|
| 513 |
+
* - fmaf(\p x, \p y,
|
| 514 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 515 |
+
* \xmlonly
|
| 516 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 517 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 518 |
+
* <m:mo>-</m:mo>
|
| 519 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 520 |
+
* </m:math>
|
| 521 |
+
* </d4p_MathML>
|
| 522 |
+
* \endxmlonly
|
| 523 |
+
* ) returns NaN if
|
| 524 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 525 |
+
* \xmlonly
|
| 526 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 527 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 528 |
+
* <m:mi>x</m:mi>
|
| 529 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 530 |
+
* <m:mi>y</m:mi>
|
| 531 |
+
* </m:math>
|
| 532 |
+
* </d4p_MathML>
|
| 533 |
+
* \endxmlonly
|
| 534 |
+
* is an exact
|
| 535 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 536 |
+
* \xmlonly
|
| 537 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 538 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 539 |
+
* <m:mo>+</m:mo>
|
| 540 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 541 |
+
* </m:math>
|
| 542 |
+
* </d4p_MathML>
|
| 543 |
+
* \endxmlonly
|
| 544 |
+
* .
|
| 545 |
+
* - fmaf(\p x, \p y,
|
| 546 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 547 |
+
* \xmlonly
|
| 548 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 549 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 550 |
+
* <m:mo>+</m:mo>
|
| 551 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 552 |
+
* </m:math>
|
| 553 |
+
* </d4p_MathML>
|
| 554 |
+
* \endxmlonly
|
| 555 |
+
* ) returns NaN if
|
| 556 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 557 |
+
* \xmlonly
|
| 558 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 559 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 560 |
+
* <m:mi>x</m:mi>
|
| 561 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 562 |
+
* <m:mi>y</m:mi>
|
| 563 |
+
* </m:math>
|
| 564 |
+
* </d4p_MathML>
|
| 565 |
+
* \endxmlonly
|
| 566 |
+
* is an exact
|
| 567 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 568 |
+
* \xmlonly
|
| 569 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 570 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 571 |
+
* <m:mo>-</m:mo>
|
| 572 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 573 |
+
* </m:math>
|
| 574 |
+
* </d4p_MathML>
|
| 575 |
+
* \endxmlonly
|
| 576 |
+
* .
|
| 577 |
+
*
|
| 578 |
+
* \note_accuracy_double
|
| 579 |
+
*/
|
| 580 |
+
extern __device__ __device_builtin__ double __fma_ru(double x, double y, double z);
|
| 581 |
+
/**
|
| 582 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 583 |
+
* \brief Compute
|
| 584 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 585 |
+
* \xmlonly
|
| 586 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 587 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 588 |
+
* <m:mi>x</m:mi>
|
| 589 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 590 |
+
* <m:mi>y</m:mi>
|
| 591 |
+
* <m:mo>+</m:mo>
|
| 592 |
+
* <m:mi>z</m:mi>
|
| 593 |
+
* </m:math>
|
| 594 |
+
* </d4p_MathML>
|
| 595 |
+
* \endxmlonly
|
| 596 |
+
* as a single operation in round-down mode.
|
| 597 |
+
*
|
| 598 |
+
* Computes the value of
|
| 599 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 600 |
+
* \xmlonly
|
| 601 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 602 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 603 |
+
* <m:mi>x</m:mi>
|
| 604 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 605 |
+
* <m:mi>y</m:mi>
|
| 606 |
+
* <m:mo>+</m:mo>
|
| 607 |
+
* <m:mi>z</m:mi>
|
| 608 |
+
* </m:math>
|
| 609 |
+
* </d4p_MathML>
|
| 610 |
+
* \endxmlonly
|
| 611 |
+
* as a single ternary operation, rounding the
|
| 612 |
+
* result once in round-down (to negative infinity) mode.
|
| 613 |
+
*
|
| 614 |
+
* \return Returns the rounded value of
|
| 615 |
+
* \latexonly $x \times y + z$ \endlatexonly
|
| 616 |
+
* \xmlonly
|
| 617 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 618 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 619 |
+
* <m:mi>x</m:mi>
|
| 620 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 621 |
+
* <m:mi>y</m:mi>
|
| 622 |
+
* <m:mo>+</m:mo>
|
| 623 |
+
* <m:mi>z</m:mi>
|
| 624 |
+
* </m:math>
|
| 625 |
+
* </d4p_MathML>
|
| 626 |
+
* \endxmlonly
|
| 627 |
+
* as a single operation.
|
| 628 |
+
* - fmaf(
|
| 629 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 630 |
+
* \xmlonly
|
| 631 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 632 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 633 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 634 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 635 |
+
* </m:math>
|
| 636 |
+
* </d4p_MathML>
|
| 637 |
+
* \endxmlonly
|
| 638 |
+
* ,
|
| 639 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 640 |
+
* \xmlonly
|
| 641 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 642 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 643 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 644 |
+
* <m:mn>0</m:mn>
|
| 645 |
+
* </m:math>
|
| 646 |
+
* </d4p_MathML>
|
| 647 |
+
* \endxmlonly
|
| 648 |
+
* , \p z) returns NaN.
|
| 649 |
+
* - fmaf(
|
| 650 |
+
* \latexonly $\pm 0$ \endlatexonly
|
| 651 |
+
* \xmlonly
|
| 652 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 653 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 654 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 655 |
+
* <m:mn>0</m:mn>
|
| 656 |
+
* </m:math>
|
| 657 |
+
* </d4p_MathML>
|
| 658 |
+
* \endxmlonly
|
| 659 |
+
* ,
|
| 660 |
+
* \latexonly $\pm \infty$ \endlatexonly
|
| 661 |
+
* \xmlonly
|
| 662 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 663 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 664 |
+
* <m:mo>±<!-- ± --></m:mo>
|
| 665 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 666 |
+
* </m:math>
|
| 667 |
+
* </d4p_MathML>
|
| 668 |
+
* \endxmlonly
|
| 669 |
+
* , \p z) returns NaN.
|
| 670 |
+
* - fmaf(\p x, \p y,
|
| 671 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 672 |
+
* \xmlonly
|
| 673 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 674 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 675 |
+
* <m:mo>-</m:mo>
|
| 676 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 677 |
+
* </m:math>
|
| 678 |
+
* </d4p_MathML>
|
| 679 |
+
* \endxmlonly
|
| 680 |
+
* ) returns NaN if
|
| 681 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 682 |
+
* \xmlonly
|
| 683 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 684 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 685 |
+
* <m:mi>x</m:mi>
|
| 686 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 687 |
+
* <m:mi>y</m:mi>
|
| 688 |
+
* </m:math>
|
| 689 |
+
* </d4p_MathML>
|
| 690 |
+
* \endxmlonly
|
| 691 |
+
* is an exact
|
| 692 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 693 |
+
* \xmlonly
|
| 694 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 695 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 696 |
+
* <m:mo>+</m:mo>
|
| 697 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 698 |
+
* </m:math>
|
| 699 |
+
* </d4p_MathML>
|
| 700 |
+
* \endxmlonly
|
| 701 |
+
* .
|
| 702 |
+
* - fmaf(\p x, \p y,
|
| 703 |
+
* \latexonly $+\infty$ \endlatexonly
|
| 704 |
+
* \xmlonly
|
| 705 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 706 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 707 |
+
* <m:mo>+</m:mo>
|
| 708 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 709 |
+
* </m:math>
|
| 710 |
+
* </d4p_MathML>
|
| 711 |
+
* \endxmlonly
|
| 712 |
+
* ) returns NaN if
|
| 713 |
+
* \latexonly $x \times y$ \endlatexonly
|
| 714 |
+
* \xmlonly
|
| 715 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 716 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 717 |
+
* <m:mi>x</m:mi>
|
| 718 |
+
* <m:mo>×<!-- &Multiply; --></m:mo>
|
| 719 |
+
* <m:mi>y</m:mi>
|
| 720 |
+
* </m:math>
|
| 721 |
+
* </d4p_MathML>
|
| 722 |
+
* \endxmlonly
|
| 723 |
+
* is an exact
|
| 724 |
+
* \latexonly $-\infty$ \endlatexonly
|
| 725 |
+
* \xmlonly
|
| 726 |
+
* <d4p_MathML outputclass="xmlonly">
|
| 727 |
+
* <m:math xmlns:m="http://www.w3.org/1998/Math/MathML">
|
| 728 |
+
* <m:mo>-</m:mo>
|
| 729 |
+
* <m:mn>∞<!-- &Infinity; --></m:mn>
|
| 730 |
+
* </m:math>
|
| 731 |
+
* </d4p_MathML>
|
| 732 |
+
* \endxmlonly
|
| 733 |
+
* .
|
| 734 |
+
*
|
| 735 |
+
* \note_accuracy_double
|
| 736 |
+
*/
|
| 737 |
+
extern __device__ __device_builtin__ double __fma_rd(double x, double y, double z);
|
| 738 |
+
/**
|
| 739 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 740 |
+
* \brief Add two floating-point values in round-to-nearest-even mode.
|
| 741 |
+
*
|
| 742 |
+
* Adds two floating-point values \p x and \p y in round-to-nearest-even mode.
|
| 743 |
+
*
|
| 744 |
+
* \return Returns \p x + \p y.
|
| 745 |
+
*
|
| 746 |
+
* \note_accuracy_double
|
| 747 |
+
* \note_nofma
|
| 748 |
+
*/
|
| 749 |
+
extern __device__ __device_builtin__ double __dadd_rn(double x, double y);
|
| 750 |
+
/**
|
| 751 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 752 |
+
* \brief Add two floating-point values in round-towards-zero mode.
|
| 753 |
+
*
|
| 754 |
+
* Adds two floating-point values \p x and \p y in round-towards-zero mode.
|
| 755 |
+
*
|
| 756 |
+
* \return Returns \p x + \p y.
|
| 757 |
+
*
|
| 758 |
+
* \note_accuracy_double
|
| 759 |
+
* \note_nofma
|
| 760 |
+
*/
|
| 761 |
+
extern __device__ __device_builtin__ double __dadd_rz(double x, double y);
|
| 762 |
+
/**
|
| 763 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 764 |
+
* \brief Add two floating-point values in round-up mode.
|
| 765 |
+
*
|
| 766 |
+
* Adds two floating-point values \p x and \p y in round-up (to positive infinity) mode.
|
| 767 |
+
*
|
| 768 |
+
* \return Returns \p x + \p y.
|
| 769 |
+
*
|
| 770 |
+
* \note_accuracy_double
|
| 771 |
+
* \note_nofma
|
| 772 |
+
*/
|
| 773 |
+
extern __device__ __device_builtin__ double __dadd_ru(double x, double y);
|
| 774 |
+
/**
|
| 775 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 776 |
+
* \brief Add two floating-point values in round-down mode.
|
| 777 |
+
*
|
| 778 |
+
* Adds two floating-point values \p x and \p y in round-down (to negative infinity) mode.
|
| 779 |
+
*
|
| 780 |
+
* \return Returns \p x + \p y.
|
| 781 |
+
*
|
| 782 |
+
* \note_accuracy_double
|
| 783 |
+
* \note_nofma
|
| 784 |
+
*/
|
| 785 |
+
extern __device__ __device_builtin__ double __dadd_rd(double x, double y);
|
| 786 |
+
/**
|
| 787 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 788 |
+
* \brief Subtract two floating-point values in round-to-nearest-even mode.
|
| 789 |
+
*
|
| 790 |
+
* Subtracts two floating-point values \p x and \p y in round-to-nearest-even mode.
|
| 791 |
+
*
|
| 792 |
+
* \return Returns \p x - \p y.
|
| 793 |
+
*
|
| 794 |
+
* \note_accuracy_double
|
| 795 |
+
* \note_nofma
|
| 796 |
+
*/
|
| 797 |
+
extern __device__ __device_builtin__ double __dsub_rn(double x, double y);
|
| 798 |
+
/**
|
| 799 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 800 |
+
* \brief Subtract two floating-point values in round-towards-zero mode.
|
| 801 |
+
*
|
| 802 |
+
* Subtracts two floating-point values \p x and \p y in round-towards-zero mode.
|
| 803 |
+
*
|
| 804 |
+
* \return Returns \p x - \p y.
|
| 805 |
+
*
|
| 806 |
+
* \note_accuracy_double
|
| 807 |
+
* \note_nofma
|
| 808 |
+
*/
|
| 809 |
+
extern __device__ __device_builtin__ double __dsub_rz(double x, double y);
|
| 810 |
+
/**
|
| 811 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 812 |
+
* \brief Subtract two floating-point values in round-up mode.
|
| 813 |
+
*
|
| 814 |
+
* Subtracts two floating-point values \p x and \p y in round-up (to positive infinity) mode.
|
| 815 |
+
*
|
| 816 |
+
* \return Returns \p x - \p y.
|
| 817 |
+
*
|
| 818 |
+
* \note_accuracy_double
|
| 819 |
+
* \note_nofma
|
| 820 |
+
*/
|
| 821 |
+
extern __device__ __device_builtin__ double __dsub_ru(double x, double y);
|
| 822 |
+
/**
|
| 823 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 824 |
+
* \brief Subtract two floating-point values in round-down mode.
|
| 825 |
+
*
|
| 826 |
+
* Subtracts two floating-point values \p x and \p y in round-down (to negative infinity) mode.
|
| 827 |
+
*
|
| 828 |
+
* \return Returns \p x - \p y.
|
| 829 |
+
*
|
| 830 |
+
* \note_accuracy_double
|
| 831 |
+
* \note_nofma
|
| 832 |
+
*/
|
| 833 |
+
extern __device__ __device_builtin__ double __dsub_rd(double x, double y);
|
| 834 |
+
/**
|
| 835 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 836 |
+
* \brief Multiply two floating-point values in round-to-nearest-even mode.
|
| 837 |
+
*
|
| 838 |
+
* Multiplies two floating-point values \p x and \p y in round-to-nearest-even mode.
|
| 839 |
+
*
|
| 840 |
+
* \return Returns \p x * \p y.
|
| 841 |
+
*
|
| 842 |
+
* \note_accuracy_double
|
| 843 |
+
* \note_nofma
|
| 844 |
+
*/
|
| 845 |
+
extern __device__ __device_builtin__ double __dmul_rn(double x, double y);
|
| 846 |
+
/**
|
| 847 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 848 |
+
* \brief Multiply two floating-point values in round-towards-zero mode.
|
| 849 |
+
*
|
| 850 |
+
* Multiplies two floating-point values \p x and \p y in round-towards-zero mode.
|
| 851 |
+
*
|
| 852 |
+
* \return Returns \p x * \p y.
|
| 853 |
+
*
|
| 854 |
+
* \note_accuracy_double
|
| 855 |
+
* \note_nofma
|
| 856 |
+
*/
|
| 857 |
+
extern __device__ __device_builtin__ double __dmul_rz(double x, double y);
|
| 858 |
+
/**
|
| 859 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 860 |
+
* \brief Multiply two floating-point values in round-up mode.
|
| 861 |
+
*
|
| 862 |
+
* Multiplies two floating-point values \p x and \p y in round-up (to positive infinity) mode.
|
| 863 |
+
*
|
| 864 |
+
* \return Returns \p x * \p y.
|
| 865 |
+
*
|
| 866 |
+
* \note_accuracy_double
|
| 867 |
+
* \note_nofma
|
| 868 |
+
*/
|
| 869 |
+
extern __device__ __device_builtin__ double __dmul_ru(double x, double y);
|
| 870 |
+
/**
|
| 871 |
+
* \ingroup CUDA_MATH_INTRINSIC_DOUBLE
|
| 872 |
+
* \brief Multiply two floating-point values in round-down mode.
|
| 873 |
+
*
|
| 874 |
+
* Multiplies two floating-point values \p x and \p y in round-down (to negative infinity) mode.
|
| 875 |
+
*
|
| 876 |
+
* \return Returns \p x * \p y.
|
| 877 |
+
*
|
| 878 |
+
* \note_accuracy_double
|
| 879 |
+
* \note_nofma
|
| 880 |
+
*/
|
| 881 |
+
extern __device__ __device_builtin__ double __dmul_rd(double x, double y);
|
| 882 |
+
/**
|
| 883 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 884 |
+
* \brief Convert a double to a float in round-to-nearest-even mode.
|
| 885 |
+
*
|
| 886 |
+
* Convert the double-precision floating-point value \p x to a single-precision
|
| 887 |
+
* floating-point value in round-to-nearest-even mode.
|
| 888 |
+
* \return Returns converted value.
|
| 889 |
+
*/
|
| 890 |
+
extern __device__ __device_builtin__ float __double2float_rn(double x);
|
| 891 |
+
/**
|
| 892 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 893 |
+
* \brief Convert a double to a float in round-towards-zero mode.
|
| 894 |
+
*
|
| 895 |
+
* Convert the double-precision floating-point value \p x to a single-precision
|
| 896 |
+
* floating-point value in round-towards-zero mode.
|
| 897 |
+
* \return Returns converted value.
|
| 898 |
+
*/
|
| 899 |
+
extern __device__ __device_builtin__ float __double2float_rz(double x);
|
| 900 |
+
/**
|
| 901 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 902 |
+
* \brief Convert a double to a float in round-up mode.
|
| 903 |
+
*
|
| 904 |
+
* Convert the double-precision floating-point value \p x to a single-precision
|
| 905 |
+
* floating-point value in round-up (to positive infinity) mode.
|
| 906 |
+
* \return Returns converted value.
|
| 907 |
+
*/
|
| 908 |
+
extern __device__ __device_builtin__ float __double2float_ru(double x);
|
| 909 |
+
/**
|
| 910 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 911 |
+
* \brief Convert a double to a float in round-down mode.
|
| 912 |
+
*
|
| 913 |
+
* Convert the double-precision floating-point value \p x to a single-precision
|
| 914 |
+
* floating-point value in round-down (to negative infinity) mode.
|
| 915 |
+
* \return Returns converted value.
|
| 916 |
+
*/
|
| 917 |
+
extern __device__ __device_builtin__ float __double2float_rd(double x);
|
| 918 |
+
/**
|
| 919 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 920 |
+
* \brief Convert a double to a signed int in round-to-nearest-even mode.
|
| 921 |
+
*
|
| 922 |
+
* Convert the double-precision floating-point value \p x to a
|
| 923 |
+
* signed integer value in round-to-nearest-even mode.
|
| 924 |
+
* \return Returns converted value.
|
| 925 |
+
*/
|
| 926 |
+
extern __device__ __device_builtin__ int __double2int_rn(double x);
|
| 927 |
+
/**
|
| 928 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 929 |
+
* \brief Convert a double to a signed int in round-up mode.
|
| 930 |
+
*
|
| 931 |
+
* Convert the double-precision floating-point value \p x to a
|
| 932 |
+
* signed integer value in round-up (to positive infinity) mode.
|
| 933 |
+
* \return Returns converted value.
|
| 934 |
+
*/
|
| 935 |
+
extern __device__ __device_builtin__ int __double2int_ru(double x);
|
| 936 |
+
/**
|
| 937 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 938 |
+
* \brief Convert a double to a signed int in round-down mode.
|
| 939 |
+
*
|
| 940 |
+
* Convert the double-precision floating-point value \p x to a
|
| 941 |
+
* signed integer value in round-down (to negative infinity) mode.
|
| 942 |
+
* \return Returns converted value.
|
| 943 |
+
*/
|
| 944 |
+
extern __device__ __device_builtin__ int __double2int_rd(double x);
|
| 945 |
+
/**
|
| 946 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 947 |
+
* \brief Convert a double to an unsigned int in round-to-nearest-even mode.
|
| 948 |
+
*
|
| 949 |
+
* Convert the double-precision floating-point value \p x to an
|
| 950 |
+
* unsigned integer value in round-to-nearest-even mode.
|
| 951 |
+
* \return Returns converted value.
|
| 952 |
+
*/
|
| 953 |
+
extern __device__ __device_builtin__ unsigned int __double2uint_rn(double x);
|
| 954 |
+
/**
|
| 955 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 956 |
+
* \brief Convert a double to an unsigned int in round-up mode.
|
| 957 |
+
*
|
| 958 |
+
* Convert the double-precision floating-point value \p x to an
|
| 959 |
+
* unsigned integer value in round-up (to positive infinity) mode.
|
| 960 |
+
* \return Returns converted value.
|
| 961 |
+
*/
|
| 962 |
+
extern __device__ __device_builtin__ unsigned int __double2uint_ru(double x);
|
| 963 |
+
/**
|
| 964 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 965 |
+
* \brief Convert a double to an unsigned int in round-down mode.
|
| 966 |
+
*
|
| 967 |
+
* Convert the double-precision floating-point value \p x to an
|
| 968 |
+
* unsigned integer value in round-down (to negative infinity) mode.
|
| 969 |
+
* \return Returns converted value.
|
| 970 |
+
*/
|
| 971 |
+
extern __device__ __device_builtin__ unsigned int __double2uint_rd(double x);
|
| 972 |
+
/**
|
| 973 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 974 |
+
* \brief Convert a double to a signed 64-bit int in round-to-nearest-even mode.
|
| 975 |
+
*
|
| 976 |
+
* Convert the double-precision floating-point value \p x to a
|
| 977 |
+
* signed 64-bit integer value in round-to-nearest-even mode.
|
| 978 |
+
* \return Returns converted value.
|
| 979 |
+
*/
|
| 980 |
+
extern __device__ __device_builtin__ long long int __double2ll_rn(double x);
|
| 981 |
+
/**
|
| 982 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 983 |
+
* \brief Convert a double to a signed 64-bit int in round-up mode.
|
| 984 |
+
*
|
| 985 |
+
* Convert the double-precision floating-point value \p x to a
|
| 986 |
+
* signed 64-bit integer value in round-up (to positive infinity) mode.
|
| 987 |
+
* \return Returns converted value.
|
| 988 |
+
*/
|
| 989 |
+
extern __device__ __device_builtin__ long long int __double2ll_ru(double x);
|
| 990 |
+
/**
|
| 991 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 992 |
+
* \brief Convert a double to a signed 64-bit int in round-down mode.
|
| 993 |
+
*
|
| 994 |
+
* Convert the double-precision floating-point value \p x to a
|
| 995 |
+
* signed 64-bit integer value in round-down (to negative infinity) mode.
|
| 996 |
+
* \return Returns converted value.
|
| 997 |
+
*/
|
| 998 |
+
extern __device__ __device_builtin__ long long int __double2ll_rd(double x);
|
| 999 |
+
/**
|
| 1000 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1001 |
+
* \brief Convert a double to an unsigned 64-bit int in round-to-nearest-even mode.
|
| 1002 |
+
*
|
| 1003 |
+
* Convert the double-precision floating-point value \p x to an
|
| 1004 |
+
* unsigned 64-bit integer value in round-to-nearest-even mode.
|
| 1005 |
+
* \return Returns converted value.
|
| 1006 |
+
*/
|
| 1007 |
+
extern __device__ __device_builtin__ unsigned long long int __double2ull_rn(double x);
|
| 1008 |
+
/**
|
| 1009 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1010 |
+
* \brief Convert a double to an unsigned 64-bit int in round-up mode.
|
| 1011 |
+
*
|
| 1012 |
+
* Convert the double-precision floating-point value \p x to an
|
| 1013 |
+
* unsigned 64-bit integer value in round-up (to positive infinity) mode.
|
| 1014 |
+
* \return Returns converted value.
|
| 1015 |
+
*/
|
| 1016 |
+
extern __device__ __device_builtin__ unsigned long long int __double2ull_ru(double x);
|
| 1017 |
+
/**
|
| 1018 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1019 |
+
* \brief Convert a double to an unsigned 64-bit int in round-down mode.
|
| 1020 |
+
*
|
| 1021 |
+
* Convert the double-precision floating-point value \p x to an
|
| 1022 |
+
* unsigned 64-bit integer value in round-down (to negative infinity) mode.
|
| 1023 |
+
* \return Returns converted value.
|
| 1024 |
+
*/
|
| 1025 |
+
extern __device__ __device_builtin__ unsigned long long int __double2ull_rd(double x);
|
| 1026 |
+
/**
|
| 1027 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1028 |
+
* \brief Convert a signed int to a double.
|
| 1029 |
+
*
|
| 1030 |
+
* Convert the signed integer value \p x to a double-precision floating-point value.
|
| 1031 |
+
* \return Returns converted value.
|
| 1032 |
+
*/
|
| 1033 |
+
extern __device__ __device_builtin__ double __int2double_rn(int x);
|
| 1034 |
+
/**
|
| 1035 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1036 |
+
* \brief Convert an unsigned int to a double.
|
| 1037 |
+
*
|
| 1038 |
+
* Convert the unsigned integer value \p x to a double-precision floating-point value.
|
| 1039 |
+
* \return Returns converted value.
|
| 1040 |
+
*/
|
| 1041 |
+
extern __device__ __device_builtin__ double __uint2double_rn(unsigned int x);
|
| 1042 |
+
/**
|
| 1043 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1044 |
+
* \brief Convert a signed 64-bit int to a double in round-to-nearest-even mode.
|
| 1045 |
+
*
|
| 1046 |
+
* Convert the signed 64-bit integer value \p x to a double-precision floating-point
|
| 1047 |
+
* value in round-to-nearest-even mode.
|
| 1048 |
+
* \return Returns converted value.
|
| 1049 |
+
*/
|
| 1050 |
+
extern __device__ __device_builtin__ double __ll2double_rn(long long int x);
|
| 1051 |
+
/**
|
| 1052 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1053 |
+
* \brief Convert a signed 64-bit int to a double in round-towards-zero mode.
|
| 1054 |
+
*
|
| 1055 |
+
* Convert the signed 64-bit integer value \p x to a double-precision floating-point
|
| 1056 |
+
* value in round-towards-zero mode.
|
| 1057 |
+
* \return Returns converted value.
|
| 1058 |
+
*/
|
| 1059 |
+
extern __device__ __device_builtin__ double __ll2double_rz(long long int x);
|
| 1060 |
+
/**
|
| 1061 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1062 |
+
* \brief Convert a signed 64-bit int to a double in round-up mode.
|
| 1063 |
+
*
|
| 1064 |
+
* Convert the signed 64-bit integer value \p x to a double-precision floating-point
|
| 1065 |
+
* value in round-up (to positive infinity) mode.
|
| 1066 |
+
* \return Returns converted value.
|
| 1067 |
+
*/
|
| 1068 |
+
extern __device__ __device_builtin__ double __ll2double_ru(long long int x);
|
| 1069 |
+
/**
|
| 1070 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1071 |
+
* \brief Convert a signed 64-bit int to a double in round-down mode.
|
| 1072 |
+
*
|
| 1073 |
+
* Convert the signed 64-bit integer value \p x to a double-precision floating-point
|
| 1074 |
+
* value in round-down (to negative infinity) mode.
|
| 1075 |
+
* \return Returns converted value.
|
| 1076 |
+
*/
|
| 1077 |
+
extern __device__ __device_builtin__ double __ll2double_rd(long long int x);
|
| 1078 |
+
/**
|
| 1079 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1080 |
+
* \brief Convert an unsigned 64-bit int to a double in round-to-nearest-even mode.
|
| 1081 |
+
*
|
| 1082 |
+
* Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
|
| 1083 |
+
* value in round-to-nearest-even mode.
|
| 1084 |
+
* \return Returns converted value.
|
| 1085 |
+
*/
|
| 1086 |
+
extern __device__ __device_builtin__ double __ull2double_rn(unsigned long long int x);
|
| 1087 |
+
/**
|
| 1088 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1089 |
+
* \brief Convert an unsigned 64-bit int to a double in round-towards-zero mode.
|
| 1090 |
+
*
|
| 1091 |
+
* Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
|
| 1092 |
+
* value in round-towards-zero mode.
|
| 1093 |
+
* \return Returns converted value.
|
| 1094 |
+
*/
|
| 1095 |
+
extern __device__ __device_builtin__ double __ull2double_rz(unsigned long long int x);
|
| 1096 |
+
/**
|
| 1097 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1098 |
+
* \brief Convert an unsigned 64-bit int to a double in round-up mode.
|
| 1099 |
+
*
|
| 1100 |
+
* Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
|
| 1101 |
+
* value in round-up (to positive infinity) mode.
|
| 1102 |
+
* \return Returns converted value.
|
| 1103 |
+
*/
|
| 1104 |
+
extern __device__ __device_builtin__ double __ull2double_ru(unsigned long long int x);
|
| 1105 |
+
/**
|
| 1106 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1107 |
+
* \brief Convert an unsigned 64-bit int to a double in round-down mode.
|
| 1108 |
+
*
|
| 1109 |
+
* Convert the unsigned 64-bit integer value \p x to a double-precision floating-point
|
| 1110 |
+
* value in round-down (to negative infinity) mode.
|
| 1111 |
+
* \return Returns converted value.
|
| 1112 |
+
*/
|
| 1113 |
+
extern __device__ __device_builtin__ double __ull2double_rd(unsigned long long int x);
|
| 1114 |
+
/**
|
| 1115 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1116 |
+
* \brief Reinterpret high 32 bits in a double as a signed integer.
|
| 1117 |
+
*
|
| 1118 |
+
* Reinterpret the high 32 bits in the double-precision floating-point value \p x
|
| 1119 |
+
* as a signed integer.
|
| 1120 |
+
* \return Returns reinterpreted value.
|
| 1121 |
+
*/
|
| 1122 |
+
extern __device__ __device_builtin__ int __double2hiint(double x);
|
| 1123 |
+
/**
|
| 1124 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1125 |
+
* \brief Reinterpret low 32 bits in a double as a signed integer.
|
| 1126 |
+
*
|
| 1127 |
+
* Reinterpret the low 32 bits in the double-precision floating-point value \p x
|
| 1128 |
+
* as a signed integer.
|
| 1129 |
+
* \return Returns reinterpreted value.
|
| 1130 |
+
*/
|
| 1131 |
+
extern __device__ __device_builtin__ int __double2loint(double x);
|
| 1132 |
+
/**
|
| 1133 |
+
* \ingroup CUDA_MATH_INTRINSIC_CAST
|
| 1134 |
+
* \brief Reinterpret high and low 32-bit integer values as a double.
|
| 1135 |
+
*
|
| 1136 |
+
* Reinterpret the integer value of \p hi as the high 32 bits of a
|
| 1137 |
+
* double-precision floating-point value and the integer value of \p lo
|
| 1138 |
+
* as the low 32 bits of the same double-precision floating-point value.
|
| 1139 |
+
* \return Returns reinterpreted value.
|
| 1140 |
+
*/
|
| 1141 |
+
extern __device__ __device_builtin__ double __hiloint2double(int hi, int lo);
|
| 1142 |
+
}
|
| 1143 |
+
|
| 1144 |
+
/*******************************************************************************
|
| 1145 |
+
* *
|
| 1146 |
+
* *
|
| 1147 |
+
* *
|
| 1148 |
+
*******************************************************************************/
|
| 1149 |
+
|
| 1150 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode);
|
| 1151 |
+
|
| 1152 |
+
#undef EXCLUDE_FROM_RTC
|
| 1153 |
+
|
| 1154 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1155 |
+
|
| 1156 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1157 |
+
|
| 1158 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1159 |
+
|
| 1160 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode = cudaRoundZero);
|
| 1161 |
+
|
| 1162 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode = cudaRoundZero);
|
| 1163 |
+
|
| 1164 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode = cudaRoundZero);
|
| 1165 |
+
|
| 1166 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode = cudaRoundZero);
|
| 1167 |
+
|
| 1168 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1169 |
+
|
| 1170 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1171 |
+
|
| 1172 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1173 |
+
|
| 1174 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1175 |
+
|
| 1176 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode = cudaRoundNearest);
|
| 1177 |
+
|
| 1178 |
+
#undef __DEVICE_DOUBLE_FUNCTIONS_DECL__
|
| 1179 |
+
|
| 1180 |
+
|
| 1181 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 1182 |
+
|
| 1183 |
+
#if !defined(__CUDACC_RTC__)
|
| 1184 |
+
#include "device_double_functions.hpp"
|
| 1185 |
+
#endif /* !__CUDACC_RTC__ */
|
| 1186 |
+
|
| 1187 |
+
#endif /* !__DEVICE_DOUBLE_FUNCTIONS_H__ */
|
| 1188 |
+
|
| 1189 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__)
|
| 1190 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 1191 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_H__
|
| 1192 |
+
#endif
|
miniCUDA124/include/crt/device_double_functions.hpp
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/device_double_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__DEVICE_DOUBLE_FUNCTIONS_HPP__)
|
| 61 |
+
#define __DEVICE_DOUBLE_FUNCTIONS_HPP__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
/*******************************************************************************
|
| 72 |
+
* *
|
| 73 |
+
* *
|
| 74 |
+
* *
|
| 75 |
+
*******************************************************************************/
|
| 76 |
+
|
| 77 |
+
#if defined(__CUDACC_RTC__)
|
| 78 |
+
#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ __device__
|
| 79 |
+
#else
|
| 80 |
+
#define __DEVICE_DOUBLE_FUNCTIONS_DECL__ static __inline__ __device__
|
| 81 |
+
#endif /* __CUDACC_RTC__ */
|
| 82 |
+
|
| 83 |
+
#include "builtin_types.h"
|
| 84 |
+
#include "device_types.h"
|
| 85 |
+
#include "host_defines.h"
|
| 86 |
+
|
| 87 |
+
/*******************************************************************************
|
| 88 |
+
* *
|
| 89 |
+
* *
|
| 90 |
+
* *
|
| 91 |
+
*******************************************************************************/
|
| 92 |
+
|
| 93 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double fma(double a, double b, double c, enum cudaRoundMode mode)
|
| 94 |
+
{
|
| 95 |
+
return mode == cudaRoundZero ? __fma_rz(a, b, c) :
|
| 96 |
+
mode == cudaRoundPosInf ? __fma_ru(a, b, c) :
|
| 97 |
+
mode == cudaRoundMinInf ? __fma_rd(a, b, c) :
|
| 98 |
+
__fma_rn(a, b, c);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dmul(double a, double b, enum cudaRoundMode mode)
|
| 102 |
+
{
|
| 103 |
+
return mode == cudaRoundZero ? __dmul_rz(a, b) :
|
| 104 |
+
mode == cudaRoundPosInf ? __dmul_ru(a, b) :
|
| 105 |
+
mode == cudaRoundMinInf ? __dmul_rd(a, b) :
|
| 106 |
+
__dmul_rn(a, b);
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dadd(double a, double b, enum cudaRoundMode mode)
|
| 110 |
+
{
|
| 111 |
+
return mode == cudaRoundZero ? __dadd_rz(a, b) :
|
| 112 |
+
mode == cudaRoundPosInf ? __dadd_ru(a, b) :
|
| 113 |
+
mode == cudaRoundMinInf ? __dadd_rd(a, b) :
|
| 114 |
+
__dadd_rn(a, b);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double dsub(double a, double b, enum cudaRoundMode mode)
|
| 118 |
+
{
|
| 119 |
+
return mode == cudaRoundZero ? __dsub_rz(a, b) :
|
| 120 |
+
mode == cudaRoundPosInf ? __dsub_ru(a, b) :
|
| 121 |
+
mode == cudaRoundMinInf ? __dsub_rd(a, b) :
|
| 122 |
+
__dsub_rn(a, b);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ int double2int(double a, enum cudaRoundMode mode)
|
| 126 |
+
{
|
| 127 |
+
return mode == cudaRoundNearest ? __double2int_rn(a) :
|
| 128 |
+
mode == cudaRoundPosInf ? __double2int_ru(a) :
|
| 129 |
+
mode == cudaRoundMinInf ? __double2int_rd(a) :
|
| 130 |
+
__double2int_rz(a);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned int double2uint(double a, enum cudaRoundMode mode)
|
| 134 |
+
{
|
| 135 |
+
return mode == cudaRoundNearest ? __double2uint_rn(a) :
|
| 136 |
+
mode == cudaRoundPosInf ? __double2uint_ru(a) :
|
| 137 |
+
mode == cudaRoundMinInf ? __double2uint_rd(a) :
|
| 138 |
+
__double2uint_rz(a);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ long long int double2ll(double a, enum cudaRoundMode mode)
|
| 142 |
+
{
|
| 143 |
+
return mode == cudaRoundNearest ? __double2ll_rn(a) :
|
| 144 |
+
mode == cudaRoundPosInf ? __double2ll_ru(a) :
|
| 145 |
+
mode == cudaRoundMinInf ? __double2ll_rd(a) :
|
| 146 |
+
__double2ll_rz(a);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ unsigned long long int double2ull(double a, enum cudaRoundMode mode)
|
| 150 |
+
{
|
| 151 |
+
return mode == cudaRoundNearest ? __double2ull_rn(a) :
|
| 152 |
+
mode == cudaRoundPosInf ? __double2ull_ru(a) :
|
| 153 |
+
mode == cudaRoundMinInf ? __double2ull_rd(a) :
|
| 154 |
+
__double2ull_rz(a);
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ll2double(long long int a, enum cudaRoundMode mode)
|
| 158 |
+
{
|
| 159 |
+
return mode == cudaRoundZero ? __ll2double_rz(a) :
|
| 160 |
+
mode == cudaRoundPosInf ? __ll2double_ru(a) :
|
| 161 |
+
mode == cudaRoundMinInf ? __ll2double_rd(a) :
|
| 162 |
+
__ll2double_rn(a);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double ull2double(unsigned long long int a, enum cudaRoundMode mode)
|
| 166 |
+
{
|
| 167 |
+
return mode == cudaRoundZero ? __ull2double_rz(a) :
|
| 168 |
+
mode == cudaRoundPosInf ? __ull2double_ru(a) :
|
| 169 |
+
mode == cudaRoundMinInf ? __ull2double_rd(a) :
|
| 170 |
+
__ull2double_rn(a);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double int2double(int a, enum cudaRoundMode mode)
|
| 174 |
+
{
|
| 175 |
+
return (double)a;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double uint2double(unsigned int a, enum cudaRoundMode mode)
|
| 179 |
+
{
|
| 180 |
+
return (double)a;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
__DEVICE_DOUBLE_FUNCTIONS_DECL__ double float2double(float a, enum cudaRoundMode mode)
|
| 184 |
+
{
|
| 185 |
+
return (double)a;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
#undef __DEVICE_DOUBLE_FUNCTIONS_DECL__
|
| 189 |
+
|
| 190 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 191 |
+
|
| 192 |
+
#endif /* !__DEVICE_DOUBLE_FUNCTIONS_HPP__ */
|
| 193 |
+
|
| 194 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__)
|
| 195 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 196 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_DOUBLE_FUNCTIONS_HPP__
|
| 197 |
+
#endif
|
miniCUDA124/include/crt/device_functions.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
miniCUDA124/include/crt/device_functions.hpp
ADDED
|
@@ -0,0 +1,1197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/device_functions.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__DEVICE_FUNCTIONS_HPP__)
|
| 61 |
+
#define __DEVICE_FUNCTIONS_HPP__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if defined(__CUDACC_RTC__)
|
| 72 |
+
#define __DEVICE_FUNCTIONS_DECL__ __device__
|
| 73 |
+
#define __DEVICE_FUNCTIONS_STATIC_DECL__ __device__
|
| 74 |
+
#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ __device__ __host__ __cudart_builtin__
|
| 75 |
+
#else
|
| 76 |
+
#define __DEVICE_FUNCTIONS_DECL__ __device__
|
| 77 |
+
#define __DEVICE_FUNCTIONS_STATIC_DECL__ static __inline__ __device__
|
| 78 |
+
#define __DEVICE_HOST_FUNCTIONS_STATIC_DECL__ static __inline__ __device__ __host__ __cudart_builtin__
|
| 79 |
+
#endif /* __CUDACC_RTC__ */
|
| 80 |
+
|
| 81 |
+
#include "builtin_types.h"
|
| 82 |
+
#include "device_types.h"
|
| 83 |
+
#include "host_defines.h"
|
| 84 |
+
|
| 85 |
+
#undef __DEVICE_FUNCTIONS_DECL__
|
| 86 |
+
#undef __DEVICE_FUNCTIONS_STATIC_DECL__
|
| 87 |
+
|
| 88 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 89 |
+
|
| 90 |
+
/*******************************************************************************
|
| 91 |
+
* *
|
| 92 |
+
* *
|
| 93 |
+
* *
|
| 94 |
+
*******************************************************************************/
|
| 95 |
+
|
| 96 |
+
#ifdef __CUDACC__
|
| 97 |
+
# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900)
|
| 98 |
+
#define __CUDA_AND_AT_LEAST_SM_90__
|
| 99 |
+
#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) */
|
| 100 |
+
# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
|
| 101 |
+
#define __CUDA_AND_AT_LEAST_SM_70__
|
| 102 |
+
#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700) */
|
| 103 |
+
# if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750)
|
| 104 |
+
#define __CUDA_AND_AT_LEAST_SM_75__
|
| 105 |
+
#endif /* defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) */
|
| 106 |
+
#endif /* __CUDACC__ */
|
| 107 |
+
|
| 108 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax_s32_relu(const int a, const int b){
|
| 109 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 110 |
+
int res;
|
| 111 |
+
asm("{max.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 112 |
+
return res;
|
| 113 |
+
#else
|
| 114 |
+
// Host and older architecture code
|
| 115 |
+
int ans = max(a, b);
|
| 116 |
+
|
| 117 |
+
return (ans > 0) ? ans : 0;
|
| 118 |
+
#endif
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax_s16x2_relu(const unsigned int a, const unsigned int b){
|
| 122 |
+
unsigned int res;
|
| 123 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 124 |
+
asm("{max.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 125 |
+
#elif defined(__CUDA_ARCH__)
|
| 126 |
+
res = __vmaxs2(__vmaxs2(a, b), 0U);
|
| 127 |
+
#else
|
| 128 |
+
// Host and older architecture code
|
| 129 |
+
// Separate our high and low bit:
|
| 130 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 131 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 132 |
+
|
| 133 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 134 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 135 |
+
|
| 136 |
+
//cast to signed:
|
| 137 |
+
short aS_lo = *(short*)& aU_lo;
|
| 138 |
+
short aS_hi = *(short*)& aU_hi;
|
| 139 |
+
|
| 140 |
+
short bS_lo = *(short*)& bU_lo;
|
| 141 |
+
short bS_hi = *(short*)& bU_hi;
|
| 142 |
+
|
| 143 |
+
// Get answer
|
| 144 |
+
short ansS_lo = (short)max(aS_lo, bS_lo);
|
| 145 |
+
short ansS_hi = (short)max(aS_hi, bS_hi);
|
| 146 |
+
|
| 147 |
+
// relu
|
| 148 |
+
if(ansS_lo < 0){ ansS_lo = 0; }
|
| 149 |
+
if(ansS_hi < 0){ ansS_hi = 0; }
|
| 150 |
+
|
| 151 |
+
// Cast back to unsigned:
|
| 152 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 153 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 154 |
+
|
| 155 |
+
// Put answer back together:
|
| 156 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 157 |
+
#endif
|
| 158 |
+
|
| 159 |
+
return res;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin_s32_relu(const int a, const int b){
|
| 163 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 164 |
+
int res;
|
| 165 |
+
asm("{min.s32.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 166 |
+
return res;
|
| 167 |
+
#else
|
| 168 |
+
// Host and older architecture code
|
| 169 |
+
int ans = min(a, b);
|
| 170 |
+
|
| 171 |
+
return (ans > 0) ? ans : 0;
|
| 172 |
+
#endif
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin_s16x2_relu(const unsigned int a, const unsigned int b){
|
| 176 |
+
unsigned int res;
|
| 177 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 178 |
+
asm("{min.s16x2.relu %0, %1, %2;}" : "=r"(res) : "r"(a), "r"(b));
|
| 179 |
+
#elif defined(__CUDA_ARCH__)
|
| 180 |
+
res = __vmaxs2(__vmins2(a, b), 0U);
|
| 181 |
+
#else
|
| 182 |
+
// Host and older architecture code
|
| 183 |
+
// Separate our high and low bit:
|
| 184 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 185 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 186 |
+
|
| 187 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 188 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 189 |
+
|
| 190 |
+
//cast to signed:
|
| 191 |
+
short aS_lo = *(short*)& aU_lo;
|
| 192 |
+
short aS_hi = *(short*)& aU_hi;
|
| 193 |
+
|
| 194 |
+
short bS_lo = *(short*)& bU_lo;
|
| 195 |
+
short bS_hi = *(short*)& bU_hi;
|
| 196 |
+
|
| 197 |
+
// Get answer
|
| 198 |
+
short ansS_lo = (short)min(aS_lo, bS_lo);
|
| 199 |
+
short ansS_hi = (short)min(aS_hi, bS_hi);
|
| 200 |
+
|
| 201 |
+
// relu
|
| 202 |
+
if(ansS_lo < 0){ ansS_lo = 0; }
|
| 203 |
+
if(ansS_hi < 0){ ansS_hi = 0; }
|
| 204 |
+
|
| 205 |
+
// Cast back to unsigned:
|
| 206 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 207 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 208 |
+
|
| 209 |
+
// Put answer back together:
|
| 210 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 211 |
+
#endif
|
| 212 |
+
|
| 213 |
+
return res;
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32(const int a, const int b, const int c){
|
| 217 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 218 |
+
int res;
|
| 219 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 220 |
+
"max.s32 t1, %1, %2; \n\t"
|
| 221 |
+
"max.s32 %0, t1, %3;}\n\t"
|
| 222 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 223 |
+
return res;
|
| 224 |
+
#else
|
| 225 |
+
// Host and older architecture code
|
| 226 |
+
return max(max(a, b), c);
|
| 227 |
+
#endif
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 231 |
+
unsigned int res;
|
| 232 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 233 |
+
// Future asm code (naming/syntax may change):
|
| 234 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 235 |
+
"max.s16x2 t1, %1, %2; \n\t"
|
| 236 |
+
"max.s16x2 %0, t1, %3;}\n\t"
|
| 237 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 238 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_70__)
|
| 239 |
+
res = __vmaxs2(__vmaxs2(a, b), c);
|
| 240 |
+
#else
|
| 241 |
+
// Host and older architecture code
|
| 242 |
+
// Separate our high and low bit:
|
| 243 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 244 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 245 |
+
|
| 246 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 247 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 248 |
+
|
| 249 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 250 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 251 |
+
|
| 252 |
+
//cast to signed:
|
| 253 |
+
short aS_lo = *(short*)& aU_lo;
|
| 254 |
+
short aS_hi = *(short*)& aU_hi;
|
| 255 |
+
|
| 256 |
+
short bS_lo = *(short*)& bU_lo;
|
| 257 |
+
short bS_hi = *(short*)& bU_hi;
|
| 258 |
+
|
| 259 |
+
short cS_lo = *(short*)& cU_lo;
|
| 260 |
+
short cS_hi = *(short*)& cU_hi;
|
| 261 |
+
|
| 262 |
+
// Get answer
|
| 263 |
+
short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo);
|
| 264 |
+
short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi);
|
| 265 |
+
|
| 266 |
+
// Cast back to unsigned:
|
| 267 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 268 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 269 |
+
|
| 270 |
+
// Put answer back together:
|
| 271 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 272 |
+
#endif
|
| 273 |
+
return res;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 277 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 278 |
+
int res;
|
| 279 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 280 |
+
"max.u32 t1, %1, %2; \n\t"
|
| 281 |
+
"max.u32 %0, t1, %3;}\n\t"
|
| 282 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 283 |
+
return res;
|
| 284 |
+
#else
|
| 285 |
+
// Host and older architecture code
|
| 286 |
+
return max(max(a, b), c);
|
| 287 |
+
#endif
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 291 |
+
unsigned int res;
|
| 292 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 293 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 294 |
+
"max.u16x2 t1, %1, %2; \n\t"
|
| 295 |
+
"max.u16x2 %0, t1, %3;}\n\t"
|
| 296 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 297 |
+
#elif defined(__CUDA_ARCH__)
|
| 298 |
+
res = __vmaxu2(__vmaxu2(a, b), c);
|
| 299 |
+
#else
|
| 300 |
+
// Host and older architecture code
|
| 301 |
+
// Separate our high and low bit:
|
| 302 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 303 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 304 |
+
|
| 305 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 306 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 307 |
+
|
| 308 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 309 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 310 |
+
|
| 311 |
+
// Get answer
|
| 312 |
+
unsigned short ansU_lo = (unsigned short)max(max(aU_lo, bU_lo), cU_lo);
|
| 313 |
+
unsigned short ansU_hi = (unsigned short)max(max(aU_hi, bU_hi), cU_hi);
|
| 314 |
+
|
| 315 |
+
// Put answer back together:
|
| 316 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 317 |
+
#endif
|
| 318 |
+
|
| 319 |
+
return res;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32(const int a, const int b, const int c){
|
| 323 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 324 |
+
int res;
|
| 325 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 326 |
+
"min.s32 t1, %1, %2; \n\t"
|
| 327 |
+
"min.s32 %0, t1, %3;}\n\t"
|
| 328 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 329 |
+
return res;
|
| 330 |
+
#else
|
| 331 |
+
// Host and older architecture code
|
| 332 |
+
return min(min(a, b), c);
|
| 333 |
+
#endif
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 337 |
+
unsigned int res;
|
| 338 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 339 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 340 |
+
"min.s16x2 t1, %1, %2; \n\t"
|
| 341 |
+
"min.s16x2 %0, t1, %3;}\n\t"
|
| 342 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 343 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_70__)
|
| 344 |
+
res = __vmins2(__vmins2(a, b), c);
|
| 345 |
+
#else
|
| 346 |
+
// Host and older architecture code
|
| 347 |
+
// Separate our high and low bit:
|
| 348 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 349 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 350 |
+
|
| 351 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 352 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 353 |
+
|
| 354 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 355 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 356 |
+
|
| 357 |
+
//cast to signed:
|
| 358 |
+
short aS_lo = *(short*)& aU_lo;
|
| 359 |
+
short aS_hi = *(short*)& aU_hi;
|
| 360 |
+
|
| 361 |
+
short bS_lo = *(short*)& bU_lo;
|
| 362 |
+
short bS_hi = *(short*)& bU_hi;
|
| 363 |
+
|
| 364 |
+
short cS_lo = *(short*)& cU_lo;
|
| 365 |
+
short cS_hi = *(short*)& cU_hi;
|
| 366 |
+
|
| 367 |
+
// Get answer
|
| 368 |
+
short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo);
|
| 369 |
+
short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi);
|
| 370 |
+
|
| 371 |
+
// Cast back to unsigned:
|
| 372 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 373 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 374 |
+
|
| 375 |
+
// Put answer back together:
|
| 376 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 377 |
+
#endif
|
| 378 |
+
|
| 379 |
+
return res;
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 383 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 384 |
+
int res;
|
| 385 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 386 |
+
"min.u32 t1, %1, %2; \n\t"
|
| 387 |
+
"min.u32 %0, t1, %3;}\n\t"
|
| 388 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 389 |
+
return res;
|
| 390 |
+
#else
|
| 391 |
+
// Host and older architecture code
|
| 392 |
+
return min(min(a, b), c);
|
| 393 |
+
#endif
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 397 |
+
unsigned int res;
|
| 398 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 399 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 400 |
+
"min.u16x2 t1, %1, %2; \n\t"
|
| 401 |
+
"min.u16x2 %0, t1, %3;}\n\t"
|
| 402 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 403 |
+
#elif defined(__CUDA_ARCH__)
|
| 404 |
+
res = __vminu2(__vminu2(a, b), c);
|
| 405 |
+
#else
|
| 406 |
+
// Host and older architecture code
|
| 407 |
+
// Separate our high and low bit:
|
| 408 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 409 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 410 |
+
|
| 411 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 412 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 413 |
+
|
| 414 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 415 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 416 |
+
|
| 417 |
+
// Get answer
|
| 418 |
+
unsigned short ansU_lo = (unsigned short)min(min(aU_lo, bU_lo), cU_lo);
|
| 419 |
+
unsigned short ansU_hi = (unsigned short)min(min(aU_hi, bU_hi), cU_hi);
|
| 420 |
+
|
| 421 |
+
// Put answer back together:
|
| 422 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 423 |
+
#endif
|
| 424 |
+
|
| 425 |
+
return res;
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimax3_s32_relu(const int a, const int b, const int c){
|
| 429 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 430 |
+
int res;
|
| 431 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 432 |
+
"max.s32.relu t1, %1, %2; \n\t"
|
| 433 |
+
"max.s32.relu %0, t1, %3;}\n\t"
|
| 434 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 435 |
+
return res;
|
| 436 |
+
#else
|
| 437 |
+
// Host and older architecture code
|
| 438 |
+
int ans = max(max(a, b), c);
|
| 439 |
+
|
| 440 |
+
return (ans > 0) ? ans : 0;
|
| 441 |
+
#endif
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimax3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 445 |
+
unsigned int res;
|
| 446 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 447 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 448 |
+
"max.s16x2.relu t1, %1, %2; \n\t"
|
| 449 |
+
"max.s16x2.relu %0, t1, %3;}\n\t"
|
| 450 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 451 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_75__)
|
| 452 |
+
res = __vimax_s16x2_relu(__vmaxs2(a, b), c);
|
| 453 |
+
#else
|
| 454 |
+
// Host and older architecture code
|
| 455 |
+
// Separate our high and low bit:
|
| 456 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 457 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 458 |
+
|
| 459 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 460 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 461 |
+
|
| 462 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 463 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 464 |
+
|
| 465 |
+
//cast to signed:
|
| 466 |
+
short aS_lo = *(short*)& aU_lo;
|
| 467 |
+
short aS_hi = *(short*)& aU_hi;
|
| 468 |
+
|
| 469 |
+
short bS_lo = *(short*)& bU_lo;
|
| 470 |
+
short bS_hi = *(short*)& bU_hi;
|
| 471 |
+
|
| 472 |
+
short cS_lo = *(short*)& cU_lo;
|
| 473 |
+
short cS_hi = *(short*)& cU_hi;
|
| 474 |
+
|
| 475 |
+
// Get answer
|
| 476 |
+
short ansS_lo = (short)max(max(aS_lo, bS_lo), cS_lo);
|
| 477 |
+
short ansS_hi = (short)max(max(aS_hi, bS_hi), cS_hi);
|
| 478 |
+
|
| 479 |
+
// relu
|
| 480 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 481 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 482 |
+
|
| 483 |
+
// Cast back to unsigned:
|
| 484 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 485 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 486 |
+
|
| 487 |
+
// Put answer back together:
|
| 488 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 489 |
+
#endif
|
| 490 |
+
|
| 491 |
+
return res;
|
| 492 |
+
}
|
| 493 |
+
|
| 494 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vimin3_s32_relu(const int a, const int b, const int c){
|
| 495 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 496 |
+
int res;
|
| 497 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 498 |
+
"min.s32.relu t1, %1, %2; \n\t"
|
| 499 |
+
"min.s32.relu %0, t1, %3;}\n\t"
|
| 500 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 501 |
+
return res;
|
| 502 |
+
#else
|
| 503 |
+
// Host and older architecture code
|
| 504 |
+
int ans = min(min(a, b), c);
|
| 505 |
+
|
| 506 |
+
return (ans > 0) ? ans : 0;
|
| 507 |
+
#endif
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vimin3_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 511 |
+
unsigned res;
|
| 512 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 513 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 514 |
+
"min.s16x2.relu t1, %1, %2; \n\t"
|
| 515 |
+
"min.s16x2.relu %0, t1, %3;}\n\t"
|
| 516 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 517 |
+
#elif defined(__CUDA_AND_AT_LEAST_SM_75__)
|
| 518 |
+
res = __vimin_s16x2_relu(__vmins2(a, b), c);
|
| 519 |
+
#else
|
| 520 |
+
// Host and older architecture code
|
| 521 |
+
// Separate our high and low bit:
|
| 522 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 523 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 524 |
+
|
| 525 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 526 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 527 |
+
|
| 528 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 529 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 530 |
+
|
| 531 |
+
//cast to signed:
|
| 532 |
+
short aS_lo = *(short*)& aU_lo;
|
| 533 |
+
short aS_hi = *(short*)& aU_hi;
|
| 534 |
+
|
| 535 |
+
short bS_lo = *(short*)& bU_lo;
|
| 536 |
+
short bS_hi = *(short*)& bU_hi;
|
| 537 |
+
|
| 538 |
+
short cS_lo = *(short*)& cU_lo;
|
| 539 |
+
short cS_hi = *(short*)& cU_hi;
|
| 540 |
+
|
| 541 |
+
// Get answer
|
| 542 |
+
short ansS_lo = (short)min(min(aS_lo, bS_lo), cS_lo);
|
| 543 |
+
short ansS_hi = (short)min(min(aS_hi, bS_hi), cS_hi);
|
| 544 |
+
|
| 545 |
+
// relu
|
| 546 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 547 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 548 |
+
|
| 549 |
+
// Cast back to unsigned:
|
| 550 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 551 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 552 |
+
|
| 553 |
+
// Put answer back together:
|
| 554 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 555 |
+
#endif
|
| 556 |
+
|
| 557 |
+
return res;
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32(const int a, const int b, const int c){
|
| 561 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 562 |
+
int res;
|
| 563 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 564 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 565 |
+
"max.s32 %0, t1, %3;}\n\t"
|
| 566 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 567 |
+
return res;
|
| 568 |
+
#else
|
| 569 |
+
// Host and older architecture code
|
| 570 |
+
return max(a + b, c);
|
| 571 |
+
#endif
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 575 |
+
unsigned int res;
|
| 576 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 577 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 578 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 579 |
+
"max.s16x2 %0, t1, %3;}\n\t"
|
| 580 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 581 |
+
#elif defined(__CUDA_ARCH__)
|
| 582 |
+
res = __vmaxs2(__vadd2(a, b), c);
|
| 583 |
+
#else
|
| 584 |
+
// Host and older architecture code
|
| 585 |
+
// Separate our high and low bit:
|
| 586 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 587 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 588 |
+
|
| 589 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 590 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 591 |
+
|
| 592 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 593 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 594 |
+
|
| 595 |
+
//cast to signed:
|
| 596 |
+
short aS_lo = *(short*)& aU_lo;
|
| 597 |
+
short aS_hi = *(short*)& aU_hi;
|
| 598 |
+
|
| 599 |
+
short bS_lo = *(short*)& bU_lo;
|
| 600 |
+
short bS_hi = *(short*)& bU_hi;
|
| 601 |
+
|
| 602 |
+
short cS_lo = *(short*)& cU_lo;
|
| 603 |
+
short cS_hi = *(short*)& cU_hi;
|
| 604 |
+
|
| 605 |
+
// Get answer
|
| 606 |
+
short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo);
|
| 607 |
+
short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi);
|
| 608 |
+
|
| 609 |
+
// Cast back to unsigned:
|
| 610 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 611 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 612 |
+
|
| 613 |
+
// Put answer back together:
|
| 614 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 615 |
+
#endif
|
| 616 |
+
|
| 617 |
+
return res;
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 621 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 622 |
+
unsigned int res;
|
| 623 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 624 |
+
"add.u32 t1, %1, %2; \n\t"
|
| 625 |
+
"max.u32 %0, t1, %3;}\n\t"
|
| 626 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 627 |
+
return res;
|
| 628 |
+
#else
|
| 629 |
+
// Host and older architecture code
|
| 630 |
+
return max(a + b, c);
|
| 631 |
+
#endif
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 635 |
+
unsigned int res;
|
| 636 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 637 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 638 |
+
"add.u16x2 t1, %1, %2; \n\t"
|
| 639 |
+
"max.u16x2 %0, t1, %3;}\n\t"
|
| 640 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 641 |
+
#elif defined(__CUDA_ARCH__)
|
| 642 |
+
res = __vmaxu2(__vadd2(a, b), c);
|
| 643 |
+
#else
|
| 644 |
+
// Host and older architecture code
|
| 645 |
+
// Separate our high and low bit:
|
| 646 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 647 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 648 |
+
|
| 649 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 650 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 651 |
+
|
| 652 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 653 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 654 |
+
|
| 655 |
+
// Get answer
|
| 656 |
+
unsigned short ansU_lo = (unsigned short)max((unsigned short)(aU_lo + bU_lo), cU_lo);
|
| 657 |
+
unsigned short ansU_hi = (unsigned short)max((unsigned short)(aU_hi + bU_hi), cU_hi);
|
| 658 |
+
|
| 659 |
+
// Put answer back together:
|
| 660 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 661 |
+
#endif
|
| 662 |
+
|
| 663 |
+
return res;
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32(const int a, const int b, const int c){
|
| 667 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 668 |
+
int res;
|
| 669 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 670 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 671 |
+
"min.s32 %0, t1, %3;}\n\t"
|
| 672 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 673 |
+
return res;
|
| 674 |
+
#else
|
| 675 |
+
// Host and older architecture code
|
| 676 |
+
return min(a + b, c);
|
| 677 |
+
#endif
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 681 |
+
unsigned int res;
|
| 682 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 683 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 684 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 685 |
+
"min.s16x2 %0, t1, %3;}\n\t"
|
| 686 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 687 |
+
#elif defined(__CUDA_ARCH__)
|
| 688 |
+
res = __vmins2(__vadd2(a, b), c);
|
| 689 |
+
#else
|
| 690 |
+
// Host and older architecture code
|
| 691 |
+
// Separate our high and low bit:
|
| 692 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 693 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 694 |
+
|
| 695 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 696 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 697 |
+
|
| 698 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 699 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 700 |
+
|
| 701 |
+
//cast to signed:
|
| 702 |
+
short aS_lo = *(short*)& aU_lo;
|
| 703 |
+
short aS_hi = *(short*)& aU_hi;
|
| 704 |
+
|
| 705 |
+
short bS_lo = *(short*)& bU_lo;
|
| 706 |
+
short bS_hi = *(short*)& bU_hi;
|
| 707 |
+
|
| 708 |
+
short cS_lo = *(short*)& cU_lo;
|
| 709 |
+
short cS_hi = *(short*)& cU_hi;
|
| 710 |
+
|
| 711 |
+
// Get answer
|
| 712 |
+
short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo);
|
| 713 |
+
short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi);
|
| 714 |
+
|
| 715 |
+
// Cast back to unsigned:
|
| 716 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 717 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 718 |
+
|
| 719 |
+
// Put answer back together:
|
| 720 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 721 |
+
#endif
|
| 722 |
+
|
| 723 |
+
return res;
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u32(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 727 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 728 |
+
unsigned int res;
|
| 729 |
+
asm ("{.reg .u32 t1; \n\t"
|
| 730 |
+
"add.u32 t1, %1, %2; \n\t"
|
| 731 |
+
"min.u32 %0, t1, %3;}\n\t"
|
| 732 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 733 |
+
return res;
|
| 734 |
+
#else
|
| 735 |
+
// Host and older architecture code
|
| 736 |
+
return min(a + b, c);
|
| 737 |
+
#endif
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_u16x2(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 741 |
+
unsigned int res;
|
| 742 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 743 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 744 |
+
"add.u16x2 t1, %1, %2; \n\t"
|
| 745 |
+
"min.u16x2 %0, t1, %3;}\n\t"
|
| 746 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 747 |
+
#elif defined(__CUDA_ARCH__)
|
| 748 |
+
res = __vminu2(__vadd2(a, b), c);
|
| 749 |
+
#else
|
| 750 |
+
// Host and older architecture code
|
| 751 |
+
// Separate our high and low bit:
|
| 752 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 753 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 754 |
+
|
| 755 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 756 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 757 |
+
|
| 758 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 759 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 760 |
+
|
| 761 |
+
// Get answer
|
| 762 |
+
unsigned short ansU_lo = (unsigned short)min((unsigned short)(aU_lo + bU_lo), cU_lo);
|
| 763 |
+
unsigned short ansU_hi = (unsigned short)min((unsigned short)(aU_hi + bU_hi), cU_hi);
|
| 764 |
+
|
| 765 |
+
// Put answer back together:
|
| 766 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 767 |
+
#endif
|
| 768 |
+
|
| 769 |
+
return res;
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmax_s32_relu(const int a, const int b, const int c){
|
| 773 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 774 |
+
int res;
|
| 775 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 776 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 777 |
+
"max.s32.relu %0, t1, %3;}\n\t"
|
| 778 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 779 |
+
return res;
|
| 780 |
+
#else
|
| 781 |
+
// Host and older architecture code
|
| 782 |
+
int ans = max(a + b, c);
|
| 783 |
+
|
| 784 |
+
return (ans > 0) ? ans : 0;
|
| 785 |
+
#endif
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmax_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 789 |
+
unsigned int res;
|
| 790 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 791 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 792 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 793 |
+
"max.s16x2.relu %0, t1, %3;}\n\t"
|
| 794 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 795 |
+
#elif defined(__CUDA_ARCH__)
|
| 796 |
+
res = __vimax_s16x2_relu(__vadd2(a, b), c);
|
| 797 |
+
#else
|
| 798 |
+
// Host and older architecture code
|
| 799 |
+
// Separate our high and low bit:
|
| 800 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 801 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 802 |
+
|
| 803 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 804 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 805 |
+
|
| 806 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 807 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 808 |
+
|
| 809 |
+
//cast to signed:
|
| 810 |
+
short aS_lo = *(short*)& aU_lo;
|
| 811 |
+
short aS_hi = *(short*)& aU_hi;
|
| 812 |
+
|
| 813 |
+
short bS_lo = *(short*)& bU_lo;
|
| 814 |
+
short bS_hi = *(short*)& bU_hi;
|
| 815 |
+
|
| 816 |
+
short cS_lo = *(short*)& cU_lo;
|
| 817 |
+
short cS_hi = *(short*)& cU_hi;
|
| 818 |
+
|
| 819 |
+
// Get answer
|
| 820 |
+
short ansS_lo = (short)max((short)(aS_lo + bS_lo), cS_lo);
|
| 821 |
+
short ansS_hi = (short)max((short)(aS_hi + bS_hi), cS_hi);
|
| 822 |
+
|
| 823 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 824 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 825 |
+
|
| 826 |
+
// Cast back to unsigned:
|
| 827 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 828 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 829 |
+
|
| 830 |
+
// Put answer back together:
|
| 831 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 832 |
+
#endif
|
| 833 |
+
|
| 834 |
+
return res;
|
| 835 |
+
}
|
| 836 |
+
|
| 837 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __viaddmin_s32_relu(const int a, const int b, const int c){
|
| 838 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 839 |
+
int res;
|
| 840 |
+
asm ("{.reg .s32 t1; \n\t"
|
| 841 |
+
"add.s32 t1, %1, %2; \n\t"
|
| 842 |
+
"min.s32.relu %0, t1, %3;}\n\t"
|
| 843 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 844 |
+
return res;
|
| 845 |
+
#else
|
| 846 |
+
// Host and older architecture code
|
| 847 |
+
int ans = min(a + b, c);
|
| 848 |
+
|
| 849 |
+
return (ans > 0) ? ans : 0;
|
| 850 |
+
#endif
|
| 851 |
+
}
|
| 852 |
+
|
| 853 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __viaddmin_s16x2_relu(const unsigned int a, const unsigned int b, const unsigned int c){
|
| 854 |
+
unsigned int res;
|
| 855 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 856 |
+
asm ("{.reg .b32 t1; \n\t"
|
| 857 |
+
"add.s16x2 t1, %1, %2; \n\t"
|
| 858 |
+
"min.s16x2.relu %0, t1, %3;}\n\t"
|
| 859 |
+
: "=r"(res) : "r"(a), "r"(b), "r"(c));
|
| 860 |
+
#elif defined(__CUDA_ARCH__)
|
| 861 |
+
res = __vimin_s16x2_relu(__vadd2(a, b), c);
|
| 862 |
+
#else
|
| 863 |
+
// Host and older architecture code
|
| 864 |
+
// Separate our high and low bit:
|
| 865 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 866 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 867 |
+
|
| 868 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 869 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 870 |
+
|
| 871 |
+
unsigned short cU_lo = (unsigned short)(c & 0xFFFFU);
|
| 872 |
+
unsigned short cU_hi = (unsigned short)(c >> 16);
|
| 873 |
+
|
| 874 |
+
//cast to signed:
|
| 875 |
+
short aS_lo = *(short*)& aU_lo;
|
| 876 |
+
short aS_hi = *(short*)& aU_hi;
|
| 877 |
+
|
| 878 |
+
short bS_lo = *(short*)& bU_lo;
|
| 879 |
+
short bS_hi = *(short*)& bU_hi;
|
| 880 |
+
|
| 881 |
+
short cS_lo = *(short*)& cU_lo;
|
| 882 |
+
short cS_hi = *(short*)& cU_hi;
|
| 883 |
+
|
| 884 |
+
// Get answer
|
| 885 |
+
short ansS_lo = (short)min((short)(aS_lo + bS_lo), cS_lo);
|
| 886 |
+
short ansS_hi = (short)min((short)(aS_hi + bS_hi), cS_hi);
|
| 887 |
+
|
| 888 |
+
if(ansS_lo < 0){ansS_lo = 0;}
|
| 889 |
+
if(ansS_hi < 0){ansS_hi = 0;}
|
| 890 |
+
|
| 891 |
+
// Cast back to unsigned:
|
| 892 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 893 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 894 |
+
|
| 895 |
+
// Put answer back together:
|
| 896 |
+
res = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 897 |
+
#endif
|
| 898 |
+
|
| 899 |
+
return res;
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
// vimax vimin with predicate
|
| 903 |
+
// *pred gets set to '(a >= b)'
|
| 904 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmax_s32(const int a, const int b, bool* const pred){
|
| 905 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 906 |
+
int val;
|
| 907 |
+
unsigned int predicate_local;
|
| 908 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 909 |
+
" setp.ge.s32 __$temp1, %2, %3;\n\t"
|
| 910 |
+
" selp.s32 %0, %2, %3, __$temp1;\n\t"
|
| 911 |
+
" selp.s32 %1, 1, 0, __$temp1;}\n\t"
|
| 912 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 913 |
+
|
| 914 |
+
*pred = (bool)predicate_local;
|
| 915 |
+
return val;
|
| 916 |
+
#else
|
| 917 |
+
// Host and older architecture code
|
| 918 |
+
int ans = max(a, b);
|
| 919 |
+
|
| 920 |
+
*pred = (a >= b);
|
| 921 |
+
return ans;
|
| 922 |
+
#endif
|
| 923 |
+
}
|
| 924 |
+
|
| 925 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u32(const unsigned int a, const unsigned int b, bool* const pred){
|
| 926 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 927 |
+
unsigned int val;
|
| 928 |
+
unsigned int predicate_local;
|
| 929 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 930 |
+
" setp.ge.u32 __$temp1, %2, %3;\n\t"
|
| 931 |
+
" selp.u32 %0, %2, %3, __$temp1;\n\t"
|
| 932 |
+
" selp.u32 %1, 1, 0, __$temp1;}\n\t"
|
| 933 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 934 |
+
|
| 935 |
+
*pred = (bool)predicate_local;
|
| 936 |
+
return val;
|
| 937 |
+
#else
|
| 938 |
+
// Host and older architecture code
|
| 939 |
+
unsigned int ans = max(a, b);
|
| 940 |
+
|
| 941 |
+
*pred = (a >= b);
|
| 942 |
+
return ans;
|
| 943 |
+
#endif
|
| 944 |
+
}
|
| 945 |
+
|
| 946 |
+
// *pred gets set to '(a <= b)'
|
| 947 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ int __vibmin_s32(const int a, const int b, bool* const pred){
|
| 948 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 949 |
+
int val;
|
| 950 |
+
unsigned int predicate_local;
|
| 951 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 952 |
+
" setp.le.s32 __$temp1, %2, %3;\n\t"
|
| 953 |
+
" selp.s32 %0, %2, %3, __$temp1;\n\t"
|
| 954 |
+
" selp.s32 %1, 1, 0, __$temp1;}\n\t"
|
| 955 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 956 |
+
|
| 957 |
+
*pred = (bool)predicate_local;
|
| 958 |
+
return val;
|
| 959 |
+
#else
|
| 960 |
+
// Host and older architecture code
|
| 961 |
+
int ans = min(a, b);
|
| 962 |
+
|
| 963 |
+
*pred = (a <= b);
|
| 964 |
+
return ans;
|
| 965 |
+
#endif
|
| 966 |
+
}
|
| 967 |
+
|
| 968 |
+
// *pred gets set to '(a <= b)'
|
| 969 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u32(const unsigned int a, const unsigned int b, bool* const pred){
|
| 970 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 971 |
+
unsigned int val;
|
| 972 |
+
unsigned int predicate_local;
|
| 973 |
+
asm ("{ .reg .pred __$temp1;\n\t"
|
| 974 |
+
" setp.le.u32 __$temp1, %2, %3;\n\t"
|
| 975 |
+
" selp.u32 %0, %2, %3, __$temp1;\n\t"
|
| 976 |
+
" selp.u32 %1, 1, 0, __$temp1;}\n\t"
|
| 977 |
+
: "=r"(val), "=r"(predicate_local) : "r"(a), "r"(b));
|
| 978 |
+
|
| 979 |
+
*pred = (bool)predicate_local;
|
| 980 |
+
return val;
|
| 981 |
+
#else
|
| 982 |
+
// Host and older architecture code
|
| 983 |
+
unsigned int ans = min(a, b);
|
| 984 |
+
|
| 985 |
+
*pred = (a <= b);
|
| 986 |
+
return ans;
|
| 987 |
+
#endif
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 991 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 992 |
+
unsigned int val;
|
| 993 |
+
unsigned int predicate_local_hi;
|
| 994 |
+
unsigned int predicate_local_lo;
|
| 995 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 996 |
+
".reg .s16 rs0, rs1, rs2, rs3; \n\t"
|
| 997 |
+
"max.s16x2 %0, %3, %4; \n\t"
|
| 998 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 999 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1000 |
+
"setp.eq.s16 pv, rs0, rs2; \n\t"
|
| 1001 |
+
"setp.eq.s16 pu, rs1, rs3; \n\t"
|
| 1002 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1003 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1004 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1005 |
+
|
| 1006 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1007 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1008 |
+
return val;
|
| 1009 |
+
#else
|
| 1010 |
+
// Host and older architecture code
|
| 1011 |
+
// Separate our high and low bit:
|
| 1012 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1013 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1014 |
+
|
| 1015 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1016 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1017 |
+
|
| 1018 |
+
//cast to signed:
|
| 1019 |
+
short aS_lo = *(short*)& aU_lo;
|
| 1020 |
+
short aS_hi = *(short*)& aU_hi;
|
| 1021 |
+
|
| 1022 |
+
short bS_lo = *(short*)& bU_lo;
|
| 1023 |
+
short bS_hi = *(short*)& bU_hi;
|
| 1024 |
+
|
| 1025 |
+
// Get answer
|
| 1026 |
+
short ansS_lo = (short)max(aS_lo, bS_lo);
|
| 1027 |
+
short ansS_hi = (short)max(aS_hi, bS_hi);
|
| 1028 |
+
|
| 1029 |
+
*pred_hi = (aS_hi >= bS_hi);
|
| 1030 |
+
*pred_lo = (aS_lo >= bS_lo);
|
| 1031 |
+
|
| 1032 |
+
// Cast back to unsigned:
|
| 1033 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 1034 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 1035 |
+
|
| 1036 |
+
// Put answer back together:
|
| 1037 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1038 |
+
|
| 1039 |
+
return ans;
|
| 1040 |
+
#endif
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmax_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 1044 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1045 |
+
unsigned int val;
|
| 1046 |
+
unsigned int predicate_local_hi;
|
| 1047 |
+
unsigned int predicate_local_lo;
|
| 1048 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 1049 |
+
".reg .u16 rs0, rs1, rs2, rs3; \n\t"
|
| 1050 |
+
"max.u16x2 %0, %3, %4; \n\t"
|
| 1051 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 1052 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1053 |
+
"setp.eq.u16 pv, rs0, rs2; \n\t"
|
| 1054 |
+
"setp.eq.u16 pu, rs1, rs3; \n\t"
|
| 1055 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1056 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1057 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1058 |
+
|
| 1059 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1060 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1061 |
+
return val;
|
| 1062 |
+
#else
|
| 1063 |
+
// Host and older architecture code
|
| 1064 |
+
// Separate our high and low bit:
|
| 1065 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1066 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1067 |
+
|
| 1068 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1069 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1070 |
+
|
| 1071 |
+
// Get answer
|
| 1072 |
+
unsigned short ansU_lo = (unsigned short)max(aU_lo, bU_lo);
|
| 1073 |
+
unsigned short ansU_hi = (unsigned short)max(aU_hi, bU_hi);
|
| 1074 |
+
|
| 1075 |
+
*pred_hi = (aU_hi >= bU_hi);
|
| 1076 |
+
*pred_lo = (aU_lo >= bU_lo);
|
| 1077 |
+
|
| 1078 |
+
// Put answer back together:
|
| 1079 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1080 |
+
|
| 1081 |
+
return ans;
|
| 1082 |
+
#endif
|
| 1083 |
+
}
|
| 1084 |
+
|
| 1085 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_s16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 1086 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1087 |
+
unsigned int val;
|
| 1088 |
+
unsigned int predicate_local_hi;
|
| 1089 |
+
unsigned int predicate_local_lo;
|
| 1090 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 1091 |
+
".reg .u16 rs0, rs1, rs2, rs3; \n\t"
|
| 1092 |
+
"min.s16x2 %0, %3, %4; \n\t"
|
| 1093 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 1094 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1095 |
+
"setp.eq.s16 pv, rs0, rs2; \n\t"
|
| 1096 |
+
"setp.eq.s16 pu, rs1, rs3; \n\t"
|
| 1097 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1098 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1099 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1100 |
+
|
| 1101 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1102 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1103 |
+
return val;
|
| 1104 |
+
#else
|
| 1105 |
+
// Host and older architecture code
|
| 1106 |
+
// Separate our high and low bit:
|
| 1107 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1108 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1109 |
+
|
| 1110 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1111 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1112 |
+
|
| 1113 |
+
//cast to signed:
|
| 1114 |
+
short aS_lo = *(short*)& aU_lo;
|
| 1115 |
+
short aS_hi = *(short*)& aU_hi;
|
| 1116 |
+
|
| 1117 |
+
short bS_lo = *(short*)& bU_lo;
|
| 1118 |
+
short bS_hi = *(short*)& bU_hi;
|
| 1119 |
+
|
| 1120 |
+
// Get answer
|
| 1121 |
+
short ansS_lo = (short)min(aS_lo, bS_lo);
|
| 1122 |
+
short ansS_hi = (short)min(aS_hi, bS_hi);
|
| 1123 |
+
|
| 1124 |
+
*pred_hi = (aS_hi <= bS_hi);
|
| 1125 |
+
*pred_lo = (aS_lo <= bS_lo);
|
| 1126 |
+
|
| 1127 |
+
// Cast back to unsigned:
|
| 1128 |
+
unsigned short ansU_lo = *(unsigned short*)& ansS_lo;
|
| 1129 |
+
unsigned short ansU_hi = *(unsigned short*)& ansS_hi;
|
| 1130 |
+
|
| 1131 |
+
// Put answer back together:
|
| 1132 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1133 |
+
|
| 1134 |
+
return ans;
|
| 1135 |
+
#endif
|
| 1136 |
+
}
|
| 1137 |
+
|
| 1138 |
+
__DEVICE_HOST_FUNCTIONS_STATIC_DECL__ unsigned int __vibmin_u16x2(const unsigned int a, const unsigned int b, bool* const pred_hi, bool* const pred_lo){
|
| 1139 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1140 |
+
unsigned int val;
|
| 1141 |
+
unsigned int predicate_local_hi;
|
| 1142 |
+
unsigned int predicate_local_lo;
|
| 1143 |
+
asm ("{.reg .pred pu, pv; \n\t"
|
| 1144 |
+
".reg .u16 rs0, rs1, rs2, rs3; \n\t"
|
| 1145 |
+
"min.u16x2 %0, %3, %4; \n\t"
|
| 1146 |
+
"mov.b32 {rs0, rs1}, %0; \n\t"
|
| 1147 |
+
"mov.b32 {rs2, rs3}, %3; \n\t"
|
| 1148 |
+
"setp.eq.u16 pv, rs0, rs2; \n\t"
|
| 1149 |
+
"setp.eq.u16 pu, rs1, rs3; \n\t"
|
| 1150 |
+
"selp.b32 %1, 1, 0, pu; \n\t"
|
| 1151 |
+
"selp.b32 %2, 1, 0, pv;} \n\t"
|
| 1152 |
+
: "=r"(val), "=r"(predicate_local_hi),"=r"(predicate_local_lo) : "r"(a), "r"(b));
|
| 1153 |
+
|
| 1154 |
+
*pred_hi = (bool)predicate_local_hi;
|
| 1155 |
+
*pred_lo = (bool)predicate_local_lo;
|
| 1156 |
+
return val;
|
| 1157 |
+
#else
|
| 1158 |
+
// Host and older architecture code
|
| 1159 |
+
// Separate our high and low bit:
|
| 1160 |
+
unsigned short aU_lo = (unsigned short)(a & 0xFFFFU);
|
| 1161 |
+
unsigned short aU_hi = (unsigned short)(a >> 16);
|
| 1162 |
+
|
| 1163 |
+
unsigned short bU_lo = (unsigned short)(b & 0xFFFFU);
|
| 1164 |
+
unsigned short bU_hi = (unsigned short)(b >> 16);
|
| 1165 |
+
|
| 1166 |
+
// Get answer
|
| 1167 |
+
unsigned short ansU_lo = (unsigned short)min(aU_lo, bU_lo);
|
| 1168 |
+
unsigned short ansU_hi = (unsigned short)min(aU_hi, bU_hi);
|
| 1169 |
+
|
| 1170 |
+
*pred_hi = (aU_hi <= bU_hi);
|
| 1171 |
+
*pred_lo = (aU_lo <= bU_lo);
|
| 1172 |
+
|
| 1173 |
+
// Put answer back together:
|
| 1174 |
+
unsigned int ans = ((unsigned int) ansU_lo) | (((unsigned int) ansU_hi) << 16);
|
| 1175 |
+
|
| 1176 |
+
return ans;
|
| 1177 |
+
#endif
|
| 1178 |
+
}
|
| 1179 |
+
|
| 1180 |
+
#ifdef __CUDA_AND_AT_LEAST_SM_90__
|
| 1181 |
+
#undef __CUDA_AND_AT_LEAST_SM_90__
|
| 1182 |
+
#endif
|
| 1183 |
+
|
| 1184 |
+
#undef __DEVICE_HOST_FUNCTIONS_STATIC_DECL__
|
| 1185 |
+
|
| 1186 |
+
/*******************************************************************************
|
| 1187 |
+
* *
|
| 1188 |
+
* *
|
| 1189 |
+
* *
|
| 1190 |
+
*******************************************************************************/
|
| 1191 |
+
|
| 1192 |
+
#endif /* !__DEVICE_FUNCTIONS_HPP__ */
|
| 1193 |
+
|
| 1194 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__)
|
| 1195 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 1196 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_DEVICE_FUNCTIONS_HPP__
|
| 1197 |
+
#endif
|
miniCUDA124/include/crt/func_macro.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* NVIDIA_COPYRIGHT_BEGIN
|
| 3 |
+
*
|
| 4 |
+
* Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 7 |
+
* and proprietary rights in and to this software, related documentation
|
| 8 |
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
| 9 |
+
* distribution of this software and related documentation without an express
|
| 10 |
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 11 |
+
*
|
| 12 |
+
* NVIDIA_COPYRIGHT_END
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 16 |
+
#if defined(_MSC_VER)
|
| 17 |
+
#pragma message("crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 18 |
+
#else
|
| 19 |
+
#warning "crt/func_macro.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 20 |
+
#endif
|
| 21 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 22 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#if !defined(__FUNC_MACRO_H__)
|
| 26 |
+
#define __FUNC_MACRO_H__
|
| 27 |
+
|
| 28 |
+
#if !defined(__CUDA_INTERNAL_COMPILATION__)
|
| 29 |
+
|
| 30 |
+
#error -- incorrect inclusion of a cudart header file
|
| 31 |
+
|
| 32 |
+
#endif /* !__CUDA_INTERNAL_COMPILATION__ */
|
| 33 |
+
|
| 34 |
+
#if defined(__GNUC__)
|
| 35 |
+
|
| 36 |
+
#define __func__(decl) \
|
| 37 |
+
inline decl
|
| 38 |
+
|
| 39 |
+
#define __device_func__(decl) \
|
| 40 |
+
static __attribute__((__unused__)) decl
|
| 41 |
+
|
| 42 |
+
#elif defined(_WIN32)
|
| 43 |
+
|
| 44 |
+
#define __func__(decl) \
|
| 45 |
+
static inline decl
|
| 46 |
+
|
| 47 |
+
#define __device_func__(decl) \
|
| 48 |
+
static decl
|
| 49 |
+
|
| 50 |
+
#endif /* __GNUC__ */
|
| 51 |
+
|
| 52 |
+
#endif /* __FUNC_MACRO_H__ */
|
| 53 |
+
|
| 54 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__)
|
| 55 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 56 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_FUNC_MACRO_H__
|
| 57 |
+
#endif
|
miniCUDA124/include/crt/host_config.h
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2024 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/host_config.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__HOST_CONFIG_H__)
|
| 61 |
+
#define __HOST_CONFIG_H__
|
| 62 |
+
|
| 63 |
+
/*******************************************************************************
|
| 64 |
+
* *
|
| 65 |
+
* *
|
| 66 |
+
* *
|
| 67 |
+
*******************************************************************************/
|
| 68 |
+
|
| 69 |
+
#if defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if defined(__CUDACC_RTC__)
|
| 72 |
+
|
| 73 |
+
#define _CRTIMP
|
| 74 |
+
#define __THROW
|
| 75 |
+
|
| 76 |
+
#else /* __CUDACC_RTC__ */
|
| 77 |
+
|
| 78 |
+
/* check for host compilers that are compatible with nvcc */
|
| 79 |
+
#if !defined(__GNUC__) && !defined(_WIN32)
|
| 80 |
+
|
| 81 |
+
#error --- !!! UNSUPPORTED COMPILER !!! ---
|
| 82 |
+
|
| 83 |
+
#endif /* !__GNUC__ && !_WIN32 */
|
| 84 |
+
|
| 85 |
+
/* check invalid configurations */
|
| 86 |
+
#if defined(__PGIC__)
|
| 87 |
+
#if !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__)
|
| 88 |
+
#error -- unsupported pgc++ configuration! pgc++ is supported only on Linux x86_64!
|
| 89 |
+
#endif /* !defined(__GNUC__) || !defined(__LP64__) || !defined(__linux__) */
|
| 90 |
+
#endif /* defined(__PGIC__) */
|
| 91 |
+
|
| 92 |
+
#if defined(__powerpc__)
|
| 93 |
+
#if !defined(__powerpc64__) || !defined(__LITTLE_ENDIAN__)
|
| 94 |
+
#error -- unsupported PPC platform! Only 64-bit little endian PPC is supported!
|
| 95 |
+
#endif /* !__powerpc64__ || !__LITTLE_ENDIAN__ */
|
| 96 |
+
#endif /* __powerpc__ */
|
| 97 |
+
|
| 98 |
+
#if defined(__APPLE__) && defined(__MACH__) && !defined(__clang__)
|
| 99 |
+
#error -- clang and clang++ are the only supported host compilers on Mac OS X!
|
| 100 |
+
#endif /* __APPLE__ && __MACH__ && !__clang__ */
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
/* check host compiler version */
|
| 104 |
+
#if !__NV_NO_HOST_COMPILER_CHECK
|
| 105 |
+
|
| 106 |
+
#if defined(__ICC)
|
| 107 |
+
|
| 108 |
+
#if (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && !(__ICC >= 1900 && __ICC <= 2021)) || !defined(__GNUC__) || !defined(__LP64__)
|
| 109 |
+
|
| 110 |
+
#error -- unsupported ICC configuration! Only ICC 15.0, ICC 16.0, ICC 17.0, ICC 18.0, ICC 19.x and 20.x on Linux x86_64 are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 111 |
+
|
| 112 |
+
#endif /* (__ICC != 1500 && __ICC != 1600 && __ICC != 1700 && __ICC != 1800 && __ICC != 1900) || !__GNUC__ || !__LP64__ */
|
| 113 |
+
|
| 114 |
+
#endif /* __ICC */
|
| 115 |
+
|
| 116 |
+
#if defined(__GRCO_CLANG_COMPILER__)
|
| 117 |
+
#if (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17))
|
| 118 |
+
#error -- unsupported Grace clang version! The version must be 16.x to 17.x. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 119 |
+
#endif /* (__GRCO_CLANG_COMPILER__ == 1) && ((__clang_major__ < 16) || (__clang_major__ > 17)) */
|
| 120 |
+
|
| 121 |
+
#endif /* __GRCO_CLANG_COMPILER__ */
|
| 122 |
+
|
| 123 |
+
#if defined(__INTEL_CLANG_COMPILER)
|
| 124 |
+
#error -- unsupported Intel ICX compiler! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 125 |
+
#endif /* __INTEL_CLANG_COMPILER */
|
| 126 |
+
|
| 127 |
+
#if defined(__powerpc__)
|
| 128 |
+
|
| 129 |
+
#if defined(__ibmxl_vrm__) && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) && \
|
| 130 |
+
!(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000)
|
| 131 |
+
|
| 132 |
+
#error -- unsupported xlC version! only xlC 13.1 and 16.1 are supported. The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 133 |
+
|
| 134 |
+
#endif /* __ibmxl_vrm__ && !(__ibmxl_vrm__ >= 0x0d010000 && __ibmxl_vrm__ < 0x0d020000) &&
|
| 135 |
+
!(__ibmxl_vrm__ >= 0x10010000 && __ibmxl_vrm__ < 0x10020000) */
|
| 136 |
+
|
| 137 |
+
#endif /* __powerpc__ */
|
| 138 |
+
|
| 139 |
+
#if defined(__GNUC__)
|
| 140 |
+
|
| 141 |
+
#if __GNUC__ > 13
|
| 142 |
+
|
| 143 |
+
#error -- unsupported GNU version! gcc versions later than 13 are not supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 144 |
+
|
| 145 |
+
#endif /* __GNUC__ > 13 */
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
#if defined(__HORIZON__)
|
| 149 |
+
#if (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3))
|
| 150 |
+
#error -- unsupported HOS clang version! The version must be must be less than 18 and greater than 3.2 . The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 151 |
+
#endif /* (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) */
|
| 152 |
+
#endif /* __HORIZON__ */
|
| 153 |
+
|
| 154 |
+
#if defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__)
|
| 155 |
+
|
| 156 |
+
#if (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3))
|
| 157 |
+
#error -- unsupported clang version! clang version must be less than 18 and greater than 3.2 . The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 158 |
+
|
| 159 |
+
#endif /* (__clang_major__ >= 18) || (__clang_major__ < 3) || ((__clang_major__ == 3) && (__clang_minor__ < 3)) */
|
| 160 |
+
|
| 161 |
+
#endif /* defined(__clang__) && !defined(__ibmxl_vrm__) && !defined(__ICC) && !defined(__HORIZON__) && !defined(__APPLE__) && !defined(__GRCO_CLANG_COMPILER__) */
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
#endif /* __GNUC__ */
|
| 165 |
+
|
| 166 |
+
#if defined(_WIN32)
|
| 167 |
+
|
| 168 |
+
#if _MSC_VER < 1910 || _MSC_VER >= 1950
|
| 169 |
+
|
| 170 |
+
#error -- unsupported Microsoft Visual Studio version! Only the versions between 2017 and 2022 (inclusive) are supported! The nvcc flag '-allow-unsupported-compiler' can be used to override this version check; however, using an unsupported host compiler may cause compilation failure or incorrect run time execution. Use at your own risk.
|
| 171 |
+
|
| 172 |
+
#elif _MSC_VER >= 1910 && _MSC_VER < 1910
|
| 173 |
+
|
| 174 |
+
#pragma message("support for this version of Microsoft Visual Studio has been deprecated! Only the versions between 2017 and 2022 (inclusive) are supported!")
|
| 175 |
+
|
| 176 |
+
#endif /* (_MSC_VER < 1910 || _MSC_VER >= 1950) || (_MSC_VER >= 1910 && _MSC_VER < 1910) */
|
| 177 |
+
|
| 178 |
+
#endif /* _WIN32 */
|
| 179 |
+
#endif /* !__NV_NO_HOST_COMPILER_CHECK */
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
/* configure host compiler */
|
| 183 |
+
#if defined(__APPLE__)
|
| 184 |
+
|
| 185 |
+
#define _CRTIMP
|
| 186 |
+
#define _ACRTIMP
|
| 187 |
+
#define __THROW
|
| 188 |
+
|
| 189 |
+
#if defined(__BLOCKS__) /* nvcc does not support closures */
|
| 190 |
+
|
| 191 |
+
#undef __BLOCKS__
|
| 192 |
+
|
| 193 |
+
#endif /* __BLOCKS__ */
|
| 194 |
+
|
| 195 |
+
#elif defined(__ANDROID__)
|
| 196 |
+
|
| 197 |
+
#define _CRTIMP
|
| 198 |
+
#define _ACRTIMP
|
| 199 |
+
#define __THROW
|
| 200 |
+
|
| 201 |
+
#elif defined(__QNX__)
|
| 202 |
+
|
| 203 |
+
#define _CRTIMP
|
| 204 |
+
#define _ACRTIMP
|
| 205 |
+
#define __THROW
|
| 206 |
+
|
| 207 |
+
#elif defined(__HORIZON__)
|
| 208 |
+
|
| 209 |
+
#define _CRTIMP
|
| 210 |
+
#define _ACRTIMP
|
| 211 |
+
#define __THROW
|
| 212 |
+
|
| 213 |
+
#elif defined(__GNUC__)
|
| 214 |
+
|
| 215 |
+
#define _CRTIMP
|
| 216 |
+
#define _ACRTIMP
|
| 217 |
+
|
| 218 |
+
#include <features.h> /* for __THROW */
|
| 219 |
+
|
| 220 |
+
#elif defined(_WIN32)
|
| 221 |
+
|
| 222 |
+
#if _MSC_VER >= 1500
|
| 223 |
+
|
| 224 |
+
#undef _USE_DECLSPECS_FOR_SAL
|
| 225 |
+
#define _USE_DECLSPECS_FOR_SAL \
|
| 226 |
+
1
|
| 227 |
+
|
| 228 |
+
#endif /* _MSC_VER >= 1500 */
|
| 229 |
+
|
| 230 |
+
#if !defined(_CRT_NONSTDC_NO_WARNINGS)
|
| 231 |
+
|
| 232 |
+
#define _CRT_NONSTDC_NO_WARNINGS /* to suppress warnings */
|
| 233 |
+
|
| 234 |
+
#endif /* !_CRT_NONSTDC_NO_WARNINGS */
|
| 235 |
+
|
| 236 |
+
#if !defined(_CRT_SECURE_NO_WARNINGS)
|
| 237 |
+
|
| 238 |
+
#define _CRT_SECURE_NO_WARNINGS /* to suppress warnings */
|
| 239 |
+
|
| 240 |
+
#endif /* !_CRT_SECURE_NO_WARNINGS */
|
| 241 |
+
|
| 242 |
+
#if !defined(NOMINMAX)
|
| 243 |
+
|
| 244 |
+
#define NOMINMAX /* min and max are part of cuda runtime */
|
| 245 |
+
|
| 246 |
+
#endif /* !NOMINMAX */
|
| 247 |
+
|
| 248 |
+
#include <crtdefs.h> /* for _CRTIMP */
|
| 249 |
+
#if _MSC_VER >= 1900
|
| 250 |
+
#include <corecrt.h> /* for _ACRTIMP */
|
| 251 |
+
#endif /* _MSC_VER >= 1900 */
|
| 252 |
+
|
| 253 |
+
#define __THROW
|
| 254 |
+
|
| 255 |
+
#endif /* __APPLE__ */
|
| 256 |
+
|
| 257 |
+
#endif /* __CUDACC_RTC__ */
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
#if defined(__cplusplus) && defined(__CUDA_ARCH__) && (defined(__PGIC__) || defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER)))
|
| 261 |
+
|
| 262 |
+
#if __CUDACC_RTC__
|
| 263 |
+
typedef char *va_list;
|
| 264 |
+
#else /* !__CUDACC_RTC__ */
|
| 265 |
+
#include <cstdarg>
|
| 266 |
+
#endif /* __CUDACC_RTC__ */
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
#undef va_start
|
| 270 |
+
#undef va_end
|
| 271 |
+
#undef va_arg
|
| 272 |
+
|
| 273 |
+
#ifdef __PGIC__
|
| 274 |
+
|
| 275 |
+
#undef __builtin_va_end
|
| 276 |
+
|
| 277 |
+
#define va_start(v,l) __builtin_alt_va_start(v,l)
|
| 278 |
+
#define va_end(v) __builtin_va_end(v)
|
| 279 |
+
#define va_arg(v,l) __builtin_alt_va_arg(v,l)
|
| 280 |
+
|
| 281 |
+
#if (__cplusplus >= 201103L)
|
| 282 |
+
#undef va_copy
|
| 283 |
+
#define va_copy(d,s) __builtin_va_copy(d,s)
|
| 284 |
+
#endif
|
| 285 |
+
|
| 286 |
+
#else /* !__PGIC__ */
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
#define va_start(ap, x) (__cu_va_start(&ap, x))
|
| 290 |
+
#define va_end(ap) (__cu_va_end(&ap))
|
| 291 |
+
#define va_arg(ap, t) (*((t *)__cu_va_arg(&ap, (t *)0)))
|
| 292 |
+
|
| 293 |
+
#if (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L))
|
| 294 |
+
#undef va_copy
|
| 295 |
+
#define va_copy(apd, aps) (__cu_va_copy(&(apd), &(aps)))
|
| 296 |
+
#endif /* (_MSC_VER >= 1800) || (defined(__CUDACC_RTC__) && (__cplusplus >= 201103L)) */
|
| 297 |
+
#endif /* __PGIC__ */
|
| 298 |
+
|
| 299 |
+
#endif /* defined(__cplusplus) && (defined(__CUDACC_RTC__) || (defined(_WIN32) && defined(_MSC_VER))) */
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
#endif /* __CUDACC__ */
|
| 304 |
+
|
| 305 |
+
#endif /* !__HOST_CONFIG_H__ */
|
| 306 |
+
|
| 307 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__)
|
| 308 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 309 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H__
|
| 310 |
+
#endif
|
miniCUDA124/include/crt/host_defines.h
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 1993-2023 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/host_defines.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__HOST_DEFINES_H__)
|
| 61 |
+
#define __HOST_DEFINES_H__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC__) && !defined(__CUDACC_RTC__) && !defined(__CUDADEVRT_INTERNAL__) && !defined(_ALLOW_UNSUPPORTED_LIBCPP)
|
| 64 |
+
#include <ctype.h>
|
| 65 |
+
#if ((defined(_MSC_VER ) && (defined(_M_X64) || defined(_M_AMD64))) ||\
|
| 66 |
+
(defined(__x86_64__) || defined(__amd64__))) && defined(_LIBCPP_VERSION) && !(defined(__HORIZON__) || defined(__ANDROID__) || defined(__QNX__))
|
| 67 |
+
#error "libc++ is not supported on x86 system"
|
| 68 |
+
#endif
|
| 69 |
+
#endif
|
| 70 |
+
|
| 71 |
+
/* CUDA JIT mode (__CUDACC_RTC__) also uses GNU style attributes */
|
| 72 |
+
#if defined(__GNUC__) || (defined(__PGIC__) && defined(__linux__)) || defined(__CUDA_LIBDEVICE__) || defined(__CUDACC_RTC__)
|
| 73 |
+
|
| 74 |
+
#if defined(__CUDACC_RTC__)
|
| 75 |
+
#define __volatile__ volatile
|
| 76 |
+
#endif /* __CUDACC_RTC__ */
|
| 77 |
+
|
| 78 |
+
#define __no_return__ \
|
| 79 |
+
__attribute__((noreturn))
|
| 80 |
+
|
| 81 |
+
#if defined(__CUDACC__) || defined(__CUDA_ARCH__) || defined(__CUDA_LIBDEVICE__)
|
| 82 |
+
/* gcc allows users to define attributes with underscores,
|
| 83 |
+
e.g., __attribute__((__noinline__)).
|
| 84 |
+
Consider a non-CUDA source file (e.g. .cpp) that has the
|
| 85 |
+
above attribute specification, and includes this header file. In that case,
|
| 86 |
+
defining __noinline__ as below would cause a gcc compilation error.
|
| 87 |
+
Hence, only define __noinline__ when the code is being processed
|
| 88 |
+
by a CUDA compiler component.
|
| 89 |
+
*/
|
| 90 |
+
#define __noinline__ \
|
| 91 |
+
__attribute__((noinline))
|
| 92 |
+
#endif /* __CUDACC__ || __CUDA_ARCH__ || __CUDA_LIBDEVICE__ */
|
| 93 |
+
|
| 94 |
+
#undef __forceinline__
|
| 95 |
+
#define __forceinline__ \
|
| 96 |
+
__inline__ __attribute__((always_inline))
|
| 97 |
+
#define __inline_hint__ \
|
| 98 |
+
__attribute__((nv_inline_hint))
|
| 99 |
+
#define __align__(n) \
|
| 100 |
+
__attribute__((aligned(n)))
|
| 101 |
+
#define __maxnreg__(a) \
|
| 102 |
+
__attribute__((maxnreg(a)))
|
| 103 |
+
#define __thread__ \
|
| 104 |
+
__thread
|
| 105 |
+
#define __import__
|
| 106 |
+
#define __export__
|
| 107 |
+
#define __cdecl
|
| 108 |
+
#define __annotate__(a) \
|
| 109 |
+
__attribute__((a))
|
| 110 |
+
#define __location__(a) \
|
| 111 |
+
__annotate__(a)
|
| 112 |
+
#define CUDARTAPI
|
| 113 |
+
#define CUDARTAPI_CDECL
|
| 114 |
+
|
| 115 |
+
#elif defined(_MSC_VER)
|
| 116 |
+
|
| 117 |
+
#if _MSC_VER >= 1400
|
| 118 |
+
|
| 119 |
+
#define __restrict__ \
|
| 120 |
+
__restrict
|
| 121 |
+
|
| 122 |
+
#else /* _MSC_VER >= 1400 */
|
| 123 |
+
|
| 124 |
+
#define __restrict__
|
| 125 |
+
|
| 126 |
+
#endif /* _MSC_VER >= 1400 */
|
| 127 |
+
|
| 128 |
+
#define __inline__ \
|
| 129 |
+
__inline
|
| 130 |
+
#define __no_return__ \
|
| 131 |
+
__declspec(noreturn)
|
| 132 |
+
#define __noinline__ \
|
| 133 |
+
__declspec(noinline)
|
| 134 |
+
#define __forceinline__ \
|
| 135 |
+
__forceinline
|
| 136 |
+
#define __inline_hint__ \
|
| 137 |
+
__declspec(nv_inline_hint)
|
| 138 |
+
#define __align__(n) \
|
| 139 |
+
__declspec(align(n))
|
| 140 |
+
#define __maxnreg__(n) \
|
| 141 |
+
__declspec(maxnreg(n))
|
| 142 |
+
#define __thread__ \
|
| 143 |
+
__declspec(thread)
|
| 144 |
+
#define __import__ \
|
| 145 |
+
__declspec(dllimport)
|
| 146 |
+
#define __export__ \
|
| 147 |
+
__declspec(dllexport)
|
| 148 |
+
#define __annotate__(a) \
|
| 149 |
+
__declspec(a)
|
| 150 |
+
#define __location__(a) \
|
| 151 |
+
__annotate__(__##a##__)
|
| 152 |
+
#define CUDARTAPI \
|
| 153 |
+
__stdcall
|
| 154 |
+
#define CUDARTAPI_CDECL \
|
| 155 |
+
__cdecl
|
| 156 |
+
|
| 157 |
+
#else /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */
|
| 158 |
+
|
| 159 |
+
#define __inline__
|
| 160 |
+
|
| 161 |
+
#if !defined(__align__)
|
| 162 |
+
|
| 163 |
+
#error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for '__align__' !!! ---
|
| 164 |
+
|
| 165 |
+
#endif /* !__align__ */
|
| 166 |
+
|
| 167 |
+
#if !defined(CUDARTAPI)
|
| 168 |
+
|
| 169 |
+
#error --- !!! UNKNOWN COMPILER: please provide a CUDA compatible definition for 'CUDARTAPI' !!! ---
|
| 170 |
+
|
| 171 |
+
#endif /* !CUDARTAPI */
|
| 172 |
+
|
| 173 |
+
#endif /* __GNUC__ || __CUDA_LIBDEVICE__ || __CUDACC_RTC__ */
|
| 174 |
+
|
| 175 |
+
#if (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !defined(__clang__)))) || \
|
| 176 |
+
(defined(_MSC_VER) && _MSC_VER < 1900) || \
|
| 177 |
+
(!defined(__GNUC__) && !defined(_MSC_VER))
|
| 178 |
+
|
| 179 |
+
#define __specialization_static \
|
| 180 |
+
static
|
| 181 |
+
|
| 182 |
+
#else /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) ||
|
| 183 |
+
(_MSC_VER && _MSC_VER < 1900) ||
|
| 184 |
+
(!__GNUC__ && !_MSC_VER) */
|
| 185 |
+
|
| 186 |
+
#define __specialization_static
|
| 187 |
+
|
| 188 |
+
#endif /* (__GNUC__ && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3 && !__clang__))) ||
|
| 189 |
+
(_MSC_VER && _MSC_VER < 1900) ||
|
| 190 |
+
(!__GNUC__ && !_MSC_VER) */
|
| 191 |
+
|
| 192 |
+
#if !defined(__CUDACC__) && !defined(__CUDA_LIBDEVICE__)
|
| 193 |
+
|
| 194 |
+
#undef __annotate__
|
| 195 |
+
#define __annotate__(a)
|
| 196 |
+
|
| 197 |
+
#else /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */
|
| 198 |
+
|
| 199 |
+
#define __launch_bounds__(...) \
|
| 200 |
+
__annotate__(launch_bounds(__VA_ARGS__))
|
| 201 |
+
|
| 202 |
+
#endif /* !__CUDACC__ && !__CUDA_LIBDEVICE__ */
|
| 203 |
+
|
| 204 |
+
#if defined(__CUDACC__) || defined(__CUDA_LIBDEVICE__) || \
|
| 205 |
+
defined(__GNUC__) || defined(_WIN64)
|
| 206 |
+
|
| 207 |
+
#define __builtin_align__(a) \
|
| 208 |
+
__align__(a)
|
| 209 |
+
|
| 210 |
+
#else /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */
|
| 211 |
+
|
| 212 |
+
#define __builtin_align__(a)
|
| 213 |
+
|
| 214 |
+
#endif /* __CUDACC__ || __CUDA_LIBDEVICE__ || __GNUC__ || _WIN64 */
|
| 215 |
+
|
| 216 |
+
#if defined(__CUDACC__) || !defined(__grid_constant__)
|
| 217 |
+
#define __grid_constant__ \
|
| 218 |
+
__location__(grid_constant)
|
| 219 |
+
#endif /* defined(__CUDACC__) || !defined(__grid_constant__) */
|
| 220 |
+
|
| 221 |
+
#if defined(__CUDACC__) || !defined(__host__)
|
| 222 |
+
#define __host__ \
|
| 223 |
+
__location__(host)
|
| 224 |
+
#endif /* defined(__CUDACC__) || !defined(__host__) */
|
| 225 |
+
#if defined(__CUDACC__) || !defined(__device__)
|
| 226 |
+
#define __device__ \
|
| 227 |
+
__location__(device)
|
| 228 |
+
#endif /* defined(__CUDACC__) || !defined(__device__) */
|
| 229 |
+
#if defined(__CUDACC__) || !defined(__global__)
|
| 230 |
+
#define __global__ \
|
| 231 |
+
__location__(global)
|
| 232 |
+
#endif /* defined(__CUDACC__) || !defined(__global__) */
|
| 233 |
+
#if defined(__CUDACC__) || !defined(__shared__)
|
| 234 |
+
#define __shared__ \
|
| 235 |
+
__location__(shared)
|
| 236 |
+
#endif /* defined(__CUDACC__) || !defined(__shared__) */
|
| 237 |
+
#if defined(__CUDACC__) || !defined(__constant__)
|
| 238 |
+
#define __constant__ \
|
| 239 |
+
__location__(constant)
|
| 240 |
+
#endif /* defined(__CUDACC__) || !defined(__constant__) */
|
| 241 |
+
#if defined(__CUDACC__) || !defined(__managed__)
|
| 242 |
+
#define __managed__ \
|
| 243 |
+
__location__(managed)
|
| 244 |
+
#endif /* defined(__CUDACC__) || !defined(__managed__) */
|
| 245 |
+
|
| 246 |
+
#if !defined(__CUDACC__)
|
| 247 |
+
#define __device_builtin__
|
| 248 |
+
#define __device_builtin_texture_type__
|
| 249 |
+
#define __device_builtin_surface_type__
|
| 250 |
+
#define __cudart_builtin__
|
| 251 |
+
#else /* defined(__CUDACC__) */
|
| 252 |
+
#define __device_builtin__ \
|
| 253 |
+
__location__(device_builtin)
|
| 254 |
+
#define __device_builtin_texture_type__ \
|
| 255 |
+
__location__(device_builtin_texture_type)
|
| 256 |
+
#define __device_builtin_surface_type__ \
|
| 257 |
+
__location__(device_builtin_surface_type)
|
| 258 |
+
#define __cudart_builtin__ \
|
| 259 |
+
__location__(cudart_builtin)
|
| 260 |
+
#endif /* !defined(__CUDACC__) */
|
| 261 |
+
|
| 262 |
+
#if defined(__CUDACC__) || !defined(__cluster_dims__)
|
| 263 |
+
#if defined(_MSC_VER)
|
| 264 |
+
#define __cluster_dims__(...) \
|
| 265 |
+
__declspec(__cluster_dims__(__VA_ARGS__))
|
| 266 |
+
|
| 267 |
+
#else /* !defined(_MSC_VER) */
|
| 268 |
+
#define __cluster_dims__(...) \
|
| 269 |
+
__attribute__((cluster_dims(__VA_ARGS__)))
|
| 270 |
+
#endif /* defined(_MSC_VER) */
|
| 271 |
+
#endif /* defined(__CUDACC__) || !defined(__cluster_dims__) */
|
| 272 |
+
|
| 273 |
+
#define __CUDA_ARCH_HAS_FEATURE__(_FEAT) __CUDA_ARCH_FEAT_##_FEAT
|
| 274 |
+
|
| 275 |
+
#endif /* !__HOST_DEFINES_H__ */
|
| 276 |
+
|
| 277 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__)
|
| 278 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 279 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_DEFINES_H__
|
| 280 |
+
#endif
|
miniCUDA124/include/crt/host_runtime.h
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* NVIDIA_COPYRIGHT_BEGIN
|
| 3 |
+
*
|
| 4 |
+
* Copyright (c) 2008-2023, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 7 |
+
* and proprietary rights in and to this software, related documentation
|
| 8 |
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
| 9 |
+
* distribution of this software and related documentation without an express
|
| 10 |
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 11 |
+
*
|
| 12 |
+
* NVIDIA_COPYRIGHT_END
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 16 |
+
#if defined(_MSC_VER)
|
| 17 |
+
#pragma message("crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 18 |
+
#else
|
| 19 |
+
#warning "crt/device_functions.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 20 |
+
#endif
|
| 21 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 22 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#if !defined(__CUDA_INTERNAL_COMPILATION__)
|
| 26 |
+
|
| 27 |
+
#define __CUDA_INTERNAL_COMPILATION__
|
| 28 |
+
#define __text__
|
| 29 |
+
#define __surf__
|
| 30 |
+
#define __name__shadow_var(c, cpp) \
|
| 31 |
+
#c
|
| 32 |
+
#define __name__text_var(c, cpp) \
|
| 33 |
+
#cpp
|
| 34 |
+
#define __host__shadow_var(c, cpp) \
|
| 35 |
+
cpp
|
| 36 |
+
#define __text_var(c, cpp) \
|
| 37 |
+
cpp
|
| 38 |
+
#define __device_fun(fun) \
|
| 39 |
+
#fun
|
| 40 |
+
#define __device_var(var) \
|
| 41 |
+
#var
|
| 42 |
+
#define __device__text_var(c, cpp) \
|
| 43 |
+
#c
|
| 44 |
+
#define __device__shadow_var(c, cpp) \
|
| 45 |
+
#c
|
| 46 |
+
|
| 47 |
+
#if defined(_WIN32) && !defined(_WIN64)
|
| 48 |
+
|
| 49 |
+
#define __pad__(f) \
|
| 50 |
+
f
|
| 51 |
+
|
| 52 |
+
#else /* _WIN32 && !_WIN64 */
|
| 53 |
+
|
| 54 |
+
#define __pad__(f)
|
| 55 |
+
|
| 56 |
+
#endif /* _WIN32 && !_WIN64 */
|
| 57 |
+
|
| 58 |
+
#include "builtin_types.h"
|
| 59 |
+
#include "storage_class.h"
|
| 60 |
+
|
| 61 |
+
#else /* !__CUDA_INTERNAL_COMPILATION__ */
|
| 62 |
+
|
| 63 |
+
template <typename T>
|
| 64 |
+
static inline T *__cudaAddressOf(T &val)
|
| 65 |
+
{
|
| 66 |
+
return (T *)((void *)(&(const_cast<char &>(reinterpret_cast<const volatile char &>(val)))));
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
#define __cudaRegisterBinary(X) \
|
| 70 |
+
__cudaFatCubinHandle = __cudaRegisterFatBinary((void*)&__fatDeviceText); \
|
| 71 |
+
{ void (*callback_fp)(void **) = (void (*)(void **))(X); (*callback_fp)(__cudaFatCubinHandle); __cudaRegisterFatBinaryEnd(__cudaFatCubinHandle); }\
|
| 72 |
+
atexit(__cudaUnregisterBinaryUtil)
|
| 73 |
+
|
| 74 |
+
#define __cudaRegisterVariable(handle, var, ext, size, constant, global) \
|
| 75 |
+
__cudaRegisterVar(handle, (char*)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global)
|
| 76 |
+
#define __cudaRegisterManagedVariable(handle, var, ext, size, constant, global) \
|
| 77 |
+
__cudaRegisterManagedVar(handle, (void **)&__host##var, (char*)__device##var, __name##var, ext, size, constant, global)
|
| 78 |
+
|
| 79 |
+
#define __cudaRegisterGlobalTexture(handle, tex, dim, norm, ext) \
|
| 80 |
+
__cudaRegisterTexture(handle, (const struct textureReference*)&tex, (const void**)(void*)__device##tex, __name##tex, dim, norm, ext)
|
| 81 |
+
#define __cudaRegisterGlobalSurface(handle, surf, dim, ext) \
|
| 82 |
+
__cudaRegisterSurface(handle, (const struct surfaceReference*)&surf, (const void**)(void*)__device##surf, __name##surf, dim, ext)
|
| 83 |
+
#define __cudaRegisterEntry(handle, funptr, fun, thread_limit) \
|
| 84 |
+
__cudaRegisterFunction(handle, (const char*)funptr, (char*)__device_fun(fun), #fun, -1, (uint3*)0, (uint3*)0, (dim3*)0, (dim3*)0, (int*)0)
|
| 85 |
+
|
| 86 |
+
extern "C" cudaError_t CUDARTAPI __cudaPopCallConfiguration(
|
| 87 |
+
dim3 *gridDim,
|
| 88 |
+
dim3 *blockDim,
|
| 89 |
+
size_t *sharedMem,
|
| 90 |
+
void *stream
|
| 91 |
+
);
|
| 92 |
+
|
| 93 |
+
#define __cudaLaunchPrologue(size) \
|
| 94 |
+
void * __args_arr[size]; \
|
| 95 |
+
int __args_idx = 0
|
| 96 |
+
|
| 97 |
+
#define __cudaSetupArg(arg, offset) \
|
| 98 |
+
__args_arr[__args_idx] = (void *)__cudaAddressOf(arg); ++__args_idx
|
| 99 |
+
|
| 100 |
+
#define __cudaSetupArgSimple(arg, offset) \
|
| 101 |
+
__args_arr[__args_idx] = (void *)(char *)&arg; ++__args_idx
|
| 102 |
+
|
| 103 |
+
#if defined(__GNUC__)
|
| 104 |
+
#define __NV_ATTR_UNUSED_FOR_LAUNCH __attribute__((unused))
|
| 105 |
+
#else /* !__GNUC__ */
|
| 106 |
+
#define __NV_ATTR_UNUSED_FOR_LAUNCH
|
| 107 |
+
#endif /* __GNUC__ */
|
| 108 |
+
|
| 109 |
+
#ifdef __NV_LEGACY_LAUNCH
|
| 110 |
+
/* the use of __args_idx in the expression below avoids host compiler warning about it being an
|
| 111 |
+
unused variable when the launch has no arguments */
|
| 112 |
+
#define __cudaLaunch(fun) \
|
| 113 |
+
{ volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \
|
| 114 |
+
dim3 __gridDim, __blockDim;\
|
| 115 |
+
size_t __sharedMem; \
|
| 116 |
+
cudaStream_t __stream; \
|
| 117 |
+
if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \
|
| 118 |
+
return; \
|
| 119 |
+
if (__args_idx == 0) {\
|
| 120 |
+
(void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\
|
| 121 |
+
} else { \
|
| 122 |
+
(void)cudaLaunchKernel(fun, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\
|
| 123 |
+
}\
|
| 124 |
+
}
|
| 125 |
+
#else /* !__NV_LEGACY_LAUNCH */
|
| 126 |
+
#define __cudaLaunch(fun) \
|
| 127 |
+
{ volatile static char *__f __NV_ATTR_UNUSED_FOR_LAUNCH; __f = fun; \
|
| 128 |
+
static cudaKernel_t __handle = 0; \
|
| 129 |
+
volatile static bool __tmp __NV_ATTR_UNUSED_FOR_LAUNCH = (__cudaGetKernel(&__handle, (const void *)fun) == cudaSuccess); \
|
| 130 |
+
dim3 __gridDim, __blockDim;\
|
| 131 |
+
size_t __sharedMem; \
|
| 132 |
+
cudaStream_t __stream; \
|
| 133 |
+
if (__cudaPopCallConfiguration(&__gridDim, &__blockDim, &__sharedMem, &__stream) != cudaSuccess) \
|
| 134 |
+
return; \
|
| 135 |
+
if (__args_idx == 0) {\
|
| 136 |
+
(void)__cudaLaunchKernel_helper(__handle, __gridDim, __blockDim, &__args_arr[__args_idx], __sharedMem, __stream);\
|
| 137 |
+
} else { \
|
| 138 |
+
(void)__cudaLaunchKernel_helper(__handle, __gridDim, __blockDim, &__args_arr[0], __sharedMem, __stream);\
|
| 139 |
+
}\
|
| 140 |
+
}
|
| 141 |
+
#endif /* __NV_LEGACY_LAUNCH */
|
| 142 |
+
|
| 143 |
+
#if defined(__GNUC__)
|
| 144 |
+
#define __nv_dummy_param_ref(param) \
|
| 145 |
+
{ volatile static void **__ref __attribute__((unused)); __ref = (volatile void **)param; }
|
| 146 |
+
#else /* __GNUC__ */
|
| 147 |
+
#define __nv_dummy_param_ref(param) \
|
| 148 |
+
{ volatile static void **__ref; __ref = (volatile void **)param; }
|
| 149 |
+
#endif /* __GNUC__ */
|
| 150 |
+
|
| 151 |
+
static void ____nv_dummy_param_ref(void *param) __nv_dummy_param_ref(param)
|
| 152 |
+
|
| 153 |
+
#define __REGISTERFUNCNAME_CORE(X) __cudaRegisterLinkedBinary##X
|
| 154 |
+
#define __REGISTERFUNCNAME(X) __REGISTERFUNCNAME_CORE(X)
|
| 155 |
+
|
| 156 |
+
extern "C" {
|
| 157 |
+
void __REGISTERFUNCNAME( __NV_MODULE_ID ) ( void (*)(void **), void *, void *, void (*)(void *));
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
#define __TO_STRING_CORE(X) #X
|
| 161 |
+
#define __TO_STRING(X) __TO_STRING_CORE(X)
|
| 162 |
+
|
| 163 |
+
extern "C" {
|
| 164 |
+
#if defined(_WIN32)
|
| 165 |
+
#pragma data_seg("__nv_module_id")
|
| 166 |
+
static const __declspec(allocate("__nv_module_id")) unsigned char __module_id_str[] = __TO_STRING(__NV_MODULE_ID);
|
| 167 |
+
#pragma data_seg()
|
| 168 |
+
#elif defined(__APPLE__)
|
| 169 |
+
static const unsigned char __module_id_str[] __attribute__((section ("__NV_CUDA,__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID);
|
| 170 |
+
#else
|
| 171 |
+
static const unsigned char __module_id_str[] __attribute__((section ("__nv_module_id"))) = __TO_STRING(__NV_MODULE_ID);
|
| 172 |
+
#endif
|
| 173 |
+
|
| 174 |
+
#undef __FATIDNAME_CORE
|
| 175 |
+
#undef __FATIDNAME
|
| 176 |
+
#define __FATIDNAME_CORE(X) __fatbinwrap##X
|
| 177 |
+
#define __FATIDNAME(X) __FATIDNAME_CORE(X)
|
| 178 |
+
|
| 179 |
+
#define ____cudaRegisterLinkedBinary(X) \
|
| 180 |
+
{ __REGISTERFUNCNAME(__NV_MODULE_ID) (( void (*)(void **))(X), (void *)&__FATIDNAME(__NV_MODULE_ID), (void *)&__module_id_str, (void (*)(void *))&____nv_dummy_param_ref); }
|
| 181 |
+
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
extern "C" {
|
| 185 |
+
extern void** CUDARTAPI __cudaRegisterFatBinary(
|
| 186 |
+
void *fatCubin
|
| 187 |
+
);
|
| 188 |
+
|
| 189 |
+
extern void CUDARTAPI __cudaRegisterFatBinaryEnd(
|
| 190 |
+
void **fatCubinHandle
|
| 191 |
+
);
|
| 192 |
+
|
| 193 |
+
extern void CUDARTAPI __cudaUnregisterFatBinary(
|
| 194 |
+
void **fatCubinHandle
|
| 195 |
+
);
|
| 196 |
+
|
| 197 |
+
extern void CUDARTAPI __cudaRegisterVar(
|
| 198 |
+
void **fatCubinHandle,
|
| 199 |
+
char *hostVar,
|
| 200 |
+
char *deviceAddress,
|
| 201 |
+
const char *deviceName,
|
| 202 |
+
int ext,
|
| 203 |
+
size_t size,
|
| 204 |
+
int constant,
|
| 205 |
+
int global
|
| 206 |
+
);
|
| 207 |
+
|
| 208 |
+
extern void CUDARTAPI __cudaRegisterManagedVar(
|
| 209 |
+
void **fatCubinHandle,
|
| 210 |
+
void **hostVarPtrAddress,
|
| 211 |
+
char *deviceAddress,
|
| 212 |
+
const char *deviceName,
|
| 213 |
+
int ext,
|
| 214 |
+
size_t size,
|
| 215 |
+
int constant,
|
| 216 |
+
int global
|
| 217 |
+
);
|
| 218 |
+
|
| 219 |
+
extern char CUDARTAPI __cudaInitModule(
|
| 220 |
+
void **fatCubinHandle
|
| 221 |
+
);
|
| 222 |
+
|
| 223 |
+
extern void CUDARTAPI __cudaRegisterTexture(
|
| 224 |
+
void **fatCubinHandle,
|
| 225 |
+
const struct textureReference *hostVar,
|
| 226 |
+
const void **deviceAddress,
|
| 227 |
+
const char *deviceName,
|
| 228 |
+
int dim,
|
| 229 |
+
int norm,
|
| 230 |
+
int ext
|
| 231 |
+
);
|
| 232 |
+
|
| 233 |
+
extern void CUDARTAPI __cudaRegisterSurface(
|
| 234 |
+
void **fatCubinHandle,
|
| 235 |
+
const struct surfaceReference *hostVar,
|
| 236 |
+
const void **deviceAddress,
|
| 237 |
+
const char *deviceName,
|
| 238 |
+
int dim,
|
| 239 |
+
int ext
|
| 240 |
+
);
|
| 241 |
+
|
| 242 |
+
extern void CUDARTAPI __cudaRegisterFunction(
|
| 243 |
+
void **fatCubinHandle,
|
| 244 |
+
const char *hostFun,
|
| 245 |
+
char *deviceFun,
|
| 246 |
+
const char *deviceName,
|
| 247 |
+
int thread_limit,
|
| 248 |
+
uint3 *tid,
|
| 249 |
+
uint3 *bid,
|
| 250 |
+
dim3 *bDim,
|
| 251 |
+
dim3 *gDim,
|
| 252 |
+
int *wSize
|
| 253 |
+
);
|
| 254 |
+
|
| 255 |
+
#if defined(__APPLE__)
|
| 256 |
+
extern "C" int atexit(void (*)(void));
|
| 257 |
+
|
| 258 |
+
#elif defined(__GNUC__) && !defined(__ANDROID__) && !defined(__HORIZON__)
|
| 259 |
+
extern int atexit(void(*)(void)) throw();
|
| 260 |
+
|
| 261 |
+
#elif defined(__HORIZON__)
|
| 262 |
+
|
| 263 |
+
// __TEMP_WAR__ 200132570 HOS : Disable atexit call until it works
|
| 264 |
+
#define atexit(p)
|
| 265 |
+
|
| 266 |
+
#else /* __GNUC__ && !__ANDROID__ */
|
| 267 |
+
extern int __cdecl atexit(void(__cdecl *)(void));
|
| 268 |
+
#endif
|
| 269 |
+
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
static void **__cudaFatCubinHandle;
|
| 273 |
+
|
| 274 |
+
static void __cdecl __cudaUnregisterBinaryUtil(void)
|
| 275 |
+
{
|
| 276 |
+
____nv_dummy_param_ref((void *)&__cudaFatCubinHandle);
|
| 277 |
+
__cudaUnregisterFatBinary(__cudaFatCubinHandle);
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
static char __nv_init_managed_rt_with_module(void **handle)
|
| 281 |
+
{
|
| 282 |
+
return __cudaInitModule(handle);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
#include "common_functions.h"
|
| 286 |
+
|
| 287 |
+
#pragma pack()
|
| 288 |
+
|
| 289 |
+
#if defined(_WIN32)
|
| 290 |
+
|
| 291 |
+
#pragma warning(disable: 4099)
|
| 292 |
+
|
| 293 |
+
#if !defined(_WIN64)
|
| 294 |
+
|
| 295 |
+
#pragma warning(disable: 4408)
|
| 296 |
+
|
| 297 |
+
#endif /* !_WIN64 */
|
| 298 |
+
|
| 299 |
+
#endif /* _WIN32 */
|
| 300 |
+
|
| 301 |
+
#endif /* !__CUDA_INTERNAL_COMPILATION__ */
|
| 302 |
+
|
| 303 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__)
|
| 304 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 305 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_RUNTIME_H__
|
| 306 |
+
#endif
|
miniCUDA124/include/crt/math_functions.h
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
miniCUDA124/include/crt/math_functions.hpp
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
miniCUDA124/include/crt/mma.h
ADDED
|
@@ -0,0 +1,754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/mma.h is an internal header file and must not be used directly. Please use mma.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__CUDA_MMA_H__)
|
| 61 |
+
#define __CUDA_MMA_H__
|
| 62 |
+
|
| 63 |
+
#include <cuda_fp16.h>
|
| 64 |
+
#include <cuda_bf16.h>
|
| 65 |
+
|
| 66 |
+
#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__
|
| 67 |
+
|
| 68 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 69 |
+
|
| 70 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 74 |
+
#define __DEF_IF_HOST { }
|
| 75 |
+
#else /* !__CUDA_ARCH__ && !_NVHPC_CUDA */
|
| 76 |
+
#define __DEF_IF_HOST ;
|
| 77 |
+
#endif /* __CUDA_ARCH__ || _NVHPC_CUDA */
|
| 78 |
+
|
| 79 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720
|
| 80 |
+
#define __CUDA_IMMA__ 1
|
| 81 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */
|
| 82 |
+
|
| 83 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730
|
| 84 |
+
#define __CUDA_SUBBYTE_IMMA__ 1
|
| 85 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */
|
| 86 |
+
|
| 87 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
|
| 88 |
+
#define __CUDA_AMPERE_MMA__ 1
|
| 89 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */
|
| 90 |
+
|
| 91 |
+
namespace nvcuda {
|
| 92 |
+
namespace wmma {
|
| 93 |
+
|
| 94 |
+
// utility functions
|
| 95 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 96 |
+
inline __device__ float __float_to_tf32(float in)
|
| 97 |
+
{
|
| 98 |
+
float ret;
|
| 99 |
+
asm("{\n .reg .b32 __$1;"
|
| 100 |
+
"\n cvt.rna.tf32.f32 __$1, %1;"
|
| 101 |
+
"\n mov.b32 %0, __$1;\n}\n" : "=f"(ret) : "f"(in) );
|
| 102 |
+
return ret;
|
| 103 |
+
}
|
| 104 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 105 |
+
|
| 106 |
+
//
|
| 107 |
+
// tags
|
| 108 |
+
//
|
| 109 |
+
struct row_major;
|
| 110 |
+
struct col_major;
|
| 111 |
+
struct matrix_a;
|
| 112 |
+
struct matrix_b;
|
| 113 |
+
struct accumulator;
|
| 114 |
+
|
| 115 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 116 |
+
namespace precision {
|
| 117 |
+
struct tf32;
|
| 118 |
+
}
|
| 119 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 120 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 121 |
+
namespace experimental {
|
| 122 |
+
namespace precision {
|
| 123 |
+
struct u4; // 4-bit unsigned
|
| 124 |
+
struct s4; // 4-bit signed
|
| 125 |
+
struct b1; // 1-bit
|
| 126 |
+
}
|
| 127 |
+
enum bmmaBitOp { bmmaBitOpXOR = 1
|
| 128 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 129 |
+
, bmmaBitOpAND = 2
|
| 130 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 131 |
+
};
|
| 132 |
+
enum bmmaAccumulateOp { bmmaAccumulateOpPOPC = 1 };
|
| 133 |
+
}
|
| 134 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 135 |
+
|
| 136 |
+
//
|
| 137 |
+
// layout
|
| 138 |
+
//
|
| 139 |
+
enum layout_t {
|
| 140 |
+
mem_row_major, mem_col_major
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
template <typename T>
|
| 144 |
+
struct helper_traits {
|
| 145 |
+
typedef T element_type;
|
| 146 |
+
typedef T storage_element_type;
|
| 147 |
+
typedef T fill_argument_type;
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 151 |
+
template<> struct helper_traits<experimental::precision::u4> {
|
| 152 |
+
typedef experimental::precision::u4 element_type;
|
| 153 |
+
typedef unsigned int storage_element_type;
|
| 154 |
+
typedef unsigned int fill_argument_type;
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
template<> struct helper_traits<experimental::precision::s4> {
|
| 158 |
+
typedef experimental::precision::s4 element_type;
|
| 159 |
+
typedef int storage_element_type;
|
| 160 |
+
typedef int fill_argument_type;
|
| 161 |
+
};
|
| 162 |
+
|
| 163 |
+
template<> struct helper_traits<experimental::precision::b1> {
|
| 164 |
+
typedef experimental::precision::b1 element_type;
|
| 165 |
+
typedef unsigned int storage_element_type;
|
| 166 |
+
typedef unsigned int fill_argument_type;
|
| 167 |
+
};
|
| 168 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 169 |
+
|
| 170 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 171 |
+
template<> struct helper_traits<precision::tf32> {
|
| 172 |
+
typedef precision::tf32 element_type;
|
| 173 |
+
typedef float storage_element_type;
|
| 174 |
+
typedef float fill_argument_type;
|
| 175 |
+
};
|
| 176 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 177 |
+
|
| 178 |
+
//
|
| 179 |
+
// The base fragment type
|
| 180 |
+
//
|
| 181 |
+
/* note: alignment required for compiler implementation */
|
| 182 |
+
template <typename T, int size, int packed_size = size>
|
| 183 |
+
struct __align__(8) __frag_base {
|
| 184 |
+
|
| 185 |
+
/* Number of elements in the fragment */
|
| 186 |
+
enum {num_elements = size};
|
| 187 |
+
|
| 188 |
+
/* Number of storage elements in the fragment.
|
| 189 |
+
|
| 190 |
+
The elements of the fragment are packed together when the
|
| 191 |
+
fragment element type is experimental::precision::u4,
|
| 192 |
+
experimental::precision::s4 or experimental::precision::b1.
|
| 193 |
+
When elements are packed, num_storage_elements
|
| 194 |
+
will be smaller than num_elements.
|
| 195 |
+
*/
|
| 196 |
+
enum {num_storage_elements = packed_size};
|
| 197 |
+
|
| 198 |
+
/* element type of the fragment */
|
| 199 |
+
typedef T element_type;
|
| 200 |
+
|
| 201 |
+
/* element type of the storage representation.
|
| 202 |
+
|
| 203 |
+
The mapping from element_type to storage_element_type is as follows:
|
| 204 |
+
experimental::precision::u4 -> unsigned (8 elements in 1 storage element)
|
| 205 |
+
experimental::precision::s4 -> int (8 elements in 1 storage element)
|
| 206 |
+
experimental::precision::b1 -> unsigned (32 elements in 1 storage element)
|
| 207 |
+
precision::tf32 -> float (1 element in 1 storage element)
|
| 208 |
+
all other types T -> T
|
| 209 |
+
*/
|
| 210 |
+
typedef typename helper_traits<T>::storage_element_type storage_element_type;
|
| 211 |
+
|
| 212 |
+
/* Storage for the (possibly packed) fragment elements. */
|
| 213 |
+
storage_element_type x[num_storage_elements];
|
| 214 |
+
};
|
| 215 |
+
|
| 216 |
+
template <typename FragEleType, typename StorageType, typename ArgType>
|
| 217 |
+
static inline __device__ StorageType __get_storage_value(ArgType in) { return in; }
|
| 218 |
+
|
| 219 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 220 |
+
template<>
|
| 221 |
+
__device__ inline unsigned
|
| 222 |
+
__get_storage_value<experimental::precision::u4, unsigned, unsigned>(unsigned in)
|
| 223 |
+
{
|
| 224 |
+
/* For experimental::precision::u4 fragment element type, pack 8 elements into a single
|
| 225 |
+
32-bit unsigned int storage element */
|
| 226 |
+
unsigned val = in & 0xf;
|
| 227 |
+
return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) |
|
| 228 |
+
(val << 20) | (val << 24) | (val << 28));
|
| 229 |
+
};
|
| 230 |
+
|
| 231 |
+
template<>
|
| 232 |
+
__device__ inline int
|
| 233 |
+
__get_storage_value<experimental::precision::s4, int, int>(int in)
|
| 234 |
+
{
|
| 235 |
+
/* For experimental::precision::s4 fragment element type, pack 8 elements into a single
|
| 236 |
+
32-bit signed int storage element */
|
| 237 |
+
int val = in & 0xf;
|
| 238 |
+
return (val | (val << 4) | (val << 8) | (val << 12) | (val << 16) |
|
| 239 |
+
(val << 20) | (val << 24) | (val << 28));
|
| 240 |
+
};
|
| 241 |
+
|
| 242 |
+
template<>
|
| 243 |
+
__device__ inline unsigned
|
| 244 |
+
__get_storage_value<experimental::precision::b1, unsigned, unsigned>(unsigned in)
|
| 245 |
+
{
|
| 246 |
+
/* For experimental::precision::b1 fragment element type, pack 32 elements into a
|
| 247 |
+
single 32-bit unsigned int storage element */
|
| 248 |
+
return (in & 0x1) ? 0xFFFFFFFFU : 0;
|
| 249 |
+
}
|
| 250 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 251 |
+
|
| 252 |
+
template <typename FragEleType, int size, int packed_size>
|
| 253 |
+
__CUDA_MMA_DEVICE_DECL__ void fill_fragment(__frag_base<FragEleType, size, packed_size>& f,
|
| 254 |
+
/* The mapping from fragment element type (FragEleType) to fill_argument_type is:
|
| 255 |
+
experimental::precision::u4 -> unsigned (only lower 4 bits taken)
|
| 256 |
+
experimental::precision::s4 -> int (only lower 4 bits taken)
|
| 257 |
+
experimental::precision::b1 -> unsigned (only lowest 1 bit taken)
|
| 258 |
+
precision::tf32 -> float
|
| 259 |
+
all other types T -> T
|
| 260 |
+
*/
|
| 261 |
+
const typename helper_traits<FragEleType>::fill_argument_type & in) {
|
| 262 |
+
|
| 263 |
+
/* get the (possibly packed) storage element value. See the specializations above for fragment
|
| 264 |
+
element types where the storage representation is packed */
|
| 265 |
+
typedef typename helper_traits<FragEleType>::storage_element_type storage_type;
|
| 266 |
+
storage_type v = __get_storage_value<FragEleType, storage_type>(in);
|
| 267 |
+
#pragma unroll
|
| 268 |
+
for (int i=0; i< f.num_storage_elements; i++)
|
| 269 |
+
f.x[i] = v;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
//
|
| 273 |
+
// Fragment template
|
| 274 |
+
//
|
| 275 |
+
template<typename Use, int m, int n, int k, typename T, typename Layout=void> class fragment;
|
| 276 |
+
|
| 277 |
+
//
|
| 278 |
+
// Fragments for 16x16x16
|
| 279 |
+
//
|
| 280 |
+
template<> class fragment<matrix_a, 16, 16, 16, __half, row_major> : public __frag_base<__half, 16> {};
|
| 281 |
+
template<> class fragment<matrix_a, 16, 16, 16, __half, col_major> : public __frag_base<__half, 16> {};
|
| 282 |
+
template<> class fragment<matrix_b, 16, 16, 16, __half, row_major> : public __frag_base<__half, 16> {};
|
| 283 |
+
template<> class fragment<matrix_b, 16, 16, 16, __half, col_major> : public __frag_base<__half, 16> {};
|
| 284 |
+
template<> class fragment<accumulator, 16, 16, 16, __half> : public __frag_base<__half, 8> {};
|
| 285 |
+
template<> class fragment<accumulator, 16, 16, 16, float> : public __frag_base<float, 8> {};
|
| 286 |
+
|
| 287 |
+
#ifdef __CUDA_IMMA__
|
| 288 |
+
template<> class fragment<matrix_a, 16, 16, 16, signed char, row_major> : public __frag_base<signed char, 8> {};
|
| 289 |
+
template<> class fragment<matrix_a, 16, 16, 16, signed char, col_major> : public __frag_base<signed char, 8> {};
|
| 290 |
+
template<> class fragment<matrix_a, 16, 16, 16, unsigned char, row_major> : public __frag_base<unsigned char, 8> {};
|
| 291 |
+
template<> class fragment<matrix_a, 16, 16, 16, unsigned char, col_major> : public __frag_base<unsigned char, 8> {};
|
| 292 |
+
template<> class fragment<matrix_b, 16, 16, 16, signed char, row_major> : public __frag_base<signed char, 8> {};
|
| 293 |
+
template<> class fragment<matrix_b, 16, 16, 16, signed char, col_major> : public __frag_base<signed char, 8> {};
|
| 294 |
+
template<> class fragment<matrix_b, 16, 16, 16, unsigned char, row_major> : public __frag_base<unsigned char, 8> {};
|
| 295 |
+
template<> class fragment<matrix_b, 16, 16, 16, unsigned char, col_major> : public __frag_base<unsigned char, 8> {};
|
| 296 |
+
template<> class fragment<accumulator, 16, 16, 16, int> : public __frag_base<int, 8> {};
|
| 297 |
+
#endif /* __CUDA_IMMA__ */
|
| 298 |
+
|
| 299 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 300 |
+
template<> class fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 8> {};
|
| 301 |
+
template<> class fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 8> {};
|
| 302 |
+
template<> class fragment<matrix_b, 16, 16, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 8> {};
|
| 303 |
+
template<> class fragment<matrix_b, 16, 16, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 8> {};
|
| 304 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 305 |
+
|
| 306 |
+
//
|
| 307 |
+
// Fragments for 32x8x16
|
| 308 |
+
//
|
| 309 |
+
template<> class fragment<matrix_a, 32, 8, 16, __half, row_major> : public __frag_base<__half, 16> {};
|
| 310 |
+
template<> class fragment<matrix_a, 32, 8, 16, __half, col_major> : public __frag_base<__half, 16> {};
|
| 311 |
+
template<> class fragment<matrix_b, 32, 8, 16, __half, row_major> : public __frag_base<__half, 16> {};
|
| 312 |
+
template<> class fragment<matrix_b, 32, 8, 16, __half, col_major> : public __frag_base<__half, 16> {};
|
| 313 |
+
template<> class fragment<accumulator, 32, 8, 16, __half> : public __frag_base<__half, 8> {};
|
| 314 |
+
template<> class fragment<accumulator, 32, 8, 16, float> : public __frag_base<float, 8> {};
|
| 315 |
+
|
| 316 |
+
#ifdef __CUDA_IMMA__
|
| 317 |
+
template<> class fragment<matrix_a, 32, 8, 16, signed char, row_major> : public __frag_base<signed char, 16> {};
|
| 318 |
+
template<> class fragment<matrix_a, 32, 8, 16, signed char, col_major> : public __frag_base<signed char, 16> {};
|
| 319 |
+
template<> class fragment<matrix_a, 32, 8, 16, unsigned char, row_major> : public __frag_base<unsigned char, 16> {};
|
| 320 |
+
template<> class fragment<matrix_a, 32, 8, 16, unsigned char, col_major> : public __frag_base<unsigned char, 16> {};
|
| 321 |
+
template<> class fragment<matrix_b, 32, 8, 16, signed char, row_major> : public __frag_base<signed char, 4> {};
|
| 322 |
+
template<> class fragment<matrix_b, 32, 8, 16, signed char, col_major> : public __frag_base<signed char, 4> {};
|
| 323 |
+
template<> class fragment<matrix_b, 32, 8, 16, unsigned char, row_major> : public __frag_base<unsigned char, 4> {};
|
| 324 |
+
template<> class fragment<matrix_b, 32, 8, 16, unsigned char, col_major> : public __frag_base<unsigned char, 4> {};
|
| 325 |
+
template<> class fragment<accumulator, 32, 8, 16, int> : public __frag_base<int, 8> {};
|
| 326 |
+
#endif /* __CUDA_IMMA__ */
|
| 327 |
+
|
| 328 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 329 |
+
template<> class fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 16> {};
|
| 330 |
+
template<> class fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 16> {};
|
| 331 |
+
template<> class fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 4> {};
|
| 332 |
+
template<> class fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 4> {};
|
| 333 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 334 |
+
|
| 335 |
+
//
|
| 336 |
+
// Fragments for 8x32x16
|
| 337 |
+
//
|
| 338 |
+
template<> class fragment<matrix_a, 8, 32, 16, __half, row_major> : public __frag_base<__half, 16> {};
|
| 339 |
+
template<> class fragment<matrix_a, 8, 32, 16, __half, col_major> : public __frag_base<__half, 16> {};
|
| 340 |
+
template<> class fragment<matrix_b, 8, 32, 16, __half, row_major> : public __frag_base<__half, 16> {};
|
| 341 |
+
template<> class fragment<matrix_b, 8, 32, 16, __half, col_major> : public __frag_base<__half, 16> {};
|
| 342 |
+
template<> class fragment<accumulator, 8, 32, 16, __half> : public __frag_base<__half, 8> {};
|
| 343 |
+
template<> class fragment<accumulator, 8, 32, 16, float> : public __frag_base<float, 8> {};
|
| 344 |
+
|
| 345 |
+
#ifdef __CUDA_IMMA__
|
| 346 |
+
template<> class fragment<matrix_a, 8, 32, 16, signed char, row_major> : public __frag_base<signed char, 4> {};
|
| 347 |
+
template<> class fragment<matrix_a, 8, 32, 16, signed char, col_major> : public __frag_base<signed char, 4> {};
|
| 348 |
+
template<> class fragment<matrix_a, 8, 32, 16, unsigned char, row_major> : public __frag_base<unsigned char, 4> {};
|
| 349 |
+
template<> class fragment<matrix_a, 8, 32, 16, unsigned char, col_major> : public __frag_base<unsigned char, 4> {};
|
| 350 |
+
template<> class fragment<matrix_b, 8, 32, 16, signed char, row_major> : public __frag_base<signed char, 16> {};
|
| 351 |
+
template<> class fragment<matrix_b, 8, 32, 16, signed char, col_major> : public __frag_base<signed char, 16> {};
|
| 352 |
+
template<> class fragment<matrix_b, 8, 32, 16, unsigned char, row_major> : public __frag_base<unsigned char, 16> {};
|
| 353 |
+
template<> class fragment<matrix_b, 8, 32, 16, unsigned char, col_major> : public __frag_base<unsigned char, 16> {};
|
| 354 |
+
template<> class fragment<accumulator, 8, 32, 16, int> : public __frag_base<int, 8> {};
|
| 355 |
+
#endif /* __CUDA_IMMA__ */
|
| 356 |
+
|
| 357 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 358 |
+
template<> class fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 4> {};
|
| 359 |
+
template<> class fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 4> {};
|
| 360 |
+
template<> class fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major> : public __frag_base<__nv_bfloat16, 16> {};
|
| 361 |
+
template<> class fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major> : public __frag_base<__nv_bfloat16, 16> {};
|
| 362 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 363 |
+
|
| 364 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 365 |
+
//
|
| 366 |
+
// Fragments for 8x8x32
|
| 367 |
+
//
|
| 368 |
+
template<> class fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major> : public __frag_base<experimental::precision::u4, 8, 1> {};
|
| 369 |
+
template<> class fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major> : public __frag_base<experimental::precision::s4, 8, 1> {};
|
| 370 |
+
template<> class fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major> : public __frag_base<experimental::precision::u4, 8, 1> {};
|
| 371 |
+
template<> class fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major> : public __frag_base<experimental::precision::s4, 8, 1> {};
|
| 372 |
+
template<> class fragment<accumulator, 8, 8, 32, int> : public __frag_base<int, 2> {};
|
| 373 |
+
|
| 374 |
+
//
|
| 375 |
+
// Fragments for 8x8x128
|
| 376 |
+
//
|
| 377 |
+
template<> class fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major> : public __frag_base<experimental::precision::b1, 32, 1> {};
|
| 378 |
+
template<> class fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major> : public __frag_base<experimental::precision::b1, 32, 1> {};
|
| 379 |
+
template<> class fragment<accumulator, 8, 8, 128, int> : public __frag_base<int, 2> {};
|
| 380 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 381 |
+
|
| 382 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 383 |
+
//
|
| 384 |
+
// Fragments for 16x16x8
|
| 385 |
+
//
|
| 386 |
+
template<> class fragment<matrix_a, 16, 16, 8, precision::tf32, row_major> : public __frag_base<precision::tf32, 4> {};
|
| 387 |
+
template<> class fragment<matrix_a, 16, 16, 8, precision::tf32, col_major> : public __frag_base<precision::tf32, 4> {};
|
| 388 |
+
template<> class fragment<matrix_b, 16, 16, 8, precision::tf32, row_major> : public __frag_base<precision::tf32, 4> {};
|
| 389 |
+
template<> class fragment<matrix_b, 16, 16, 8, precision::tf32, col_major> : public __frag_base<precision::tf32, 4> {};
|
| 390 |
+
template<> class fragment<accumulator, 16, 16, 8, float> : public __frag_base<float, 8> {};
|
| 391 |
+
|
| 392 |
+
//
|
| 393 |
+
// Fragments for 8x8x4
|
| 394 |
+
//
|
| 395 |
+
template<> class fragment<matrix_a, 8, 8, 4, double, row_major> : public __frag_base<double, 1> {};
|
| 396 |
+
template<> class fragment<matrix_a, 8, 8, 4, double, col_major> : public __frag_base<double, 1> {};
|
| 397 |
+
template<> class fragment<matrix_b, 8, 8, 4, double, row_major> : public __frag_base<double, 1> {};
|
| 398 |
+
template<> class fragment<matrix_b, 8, 8, 4, double, col_major> : public __frag_base<double, 1> {};
|
| 399 |
+
template<> class fragment<accumulator, 8, 8, 4, double> : public __frag_base<double, 2> {};
|
| 400 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
//
|
| 404 |
+
// Load functions for frags of shape m16n16k16
|
| 405 |
+
//
|
| 406 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 407 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 408 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 409 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 410 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 411 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 16, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 412 |
+
|
| 413 |
+
#ifdef __CUDA_IMMA__
|
| 414 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 415 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 416 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 417 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 418 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 419 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 420 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 421 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 422 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 16, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 423 |
+
#endif /* __CUDA_IMMA__ */
|
| 424 |
+
|
| 425 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 426 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 427 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 428 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 429 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 430 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 431 |
+
|
| 432 |
+
//
|
| 433 |
+
// Load functions for frags of shape m32n8k16
|
| 434 |
+
//
|
| 435 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 436 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 437 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 438 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 439 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 440 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 441 |
+
|
| 442 |
+
#ifdef __CUDA_IMMA__
|
| 443 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 444 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 445 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 446 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 447 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 448 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 449 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 450 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 451 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 452 |
+
#endif /* __CUDA_IMMA__ */
|
| 453 |
+
|
| 454 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 455 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 456 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 457 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 458 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 459 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 460 |
+
|
| 461 |
+
//
|
| 462 |
+
// Load functions for frags of shape m8n32k16
|
| 463 |
+
//
|
| 464 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 465 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 466 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 467 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) __DEF_IF_HOST
|
| 468 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 469 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 470 |
+
|
| 471 |
+
#ifdef __CUDA_IMMA__
|
| 472 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 473 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 474 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 475 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 476 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 477 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) __DEF_IF_HOST
|
| 478 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 479 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) __DEF_IF_HOST
|
| 480 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 481 |
+
#endif /* __CUDA_IMMA__ */
|
| 482 |
+
|
| 483 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 484 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 485 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 486 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 487 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) __DEF_IF_HOST
|
| 488 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 489 |
+
|
| 490 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 491 |
+
//
|
| 492 |
+
// Load functions for frags of shape m8n8k32
|
| 493 |
+
//
|
| 494 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
|
| 495 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
|
| 496 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
|
| 497 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
|
| 498 |
+
|
| 499 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 32, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 500 |
+
|
| 501 |
+
//
|
| 502 |
+
// Load functions for frags of shape m8n8k128
|
| 503 |
+
//
|
| 504 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
|
| 505 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& a, const void* p, unsigned ldm) __DEF_IF_HOST
|
| 506 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 128, int>& a, const int* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 507 |
+
|
| 508 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 512 |
+
//
|
| 513 |
+
// Load functions for frags of shape m16n16k8
|
| 514 |
+
//
|
| 515 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
|
| 516 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
|
| 517 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
|
| 518 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) __DEF_IF_HOST
|
| 519 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 8, float>& a, const float* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 520 |
+
|
| 521 |
+
//
|
| 522 |
+
// Load functions for frags of shape m8n8k4
|
| 523 |
+
//
|
| 524 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
|
| 525 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
|
| 526 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
|
| 527 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) __DEF_IF_HOST
|
| 528 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 4, double>& a, const double* p, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 529 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 530 |
+
|
| 531 |
+
//
|
| 532 |
+
// Store functions for frags of shape m16n16k16
|
| 533 |
+
//
|
| 534 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 16, 16, 16, __half>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 535 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 16, 16, 16, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 536 |
+
#ifdef __CUDA_IMMA__
|
| 537 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 16, 16, 16, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 538 |
+
#endif /* __CUDA_IMMA__ */
|
| 539 |
+
|
| 540 |
+
//
|
| 541 |
+
// Store functions for frags of shape m32n8k16
|
| 542 |
+
//
|
| 543 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 32, 8, 16, __half>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 544 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 32, 8, 16, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 545 |
+
#ifdef __CUDA_IMMA__
|
| 546 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 32, 8, 16, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 547 |
+
#endif /* __CUDA_IMMA__ */
|
| 548 |
+
|
| 549 |
+
//
|
| 550 |
+
// Store functions for frags of shape m8n32k16
|
| 551 |
+
//
|
| 552 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 8, 32, 16, __half>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 553 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 8, 32, 16, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 554 |
+
#ifdef __CUDA_IMMA__
|
| 555 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 32, 16, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 556 |
+
#endif /* __CUDA_IMMA__ */
|
| 557 |
+
|
| 558 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 559 |
+
//
|
| 560 |
+
// Store functions for frags of shape m8n8k32
|
| 561 |
+
//
|
| 562 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 32, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 563 |
+
|
| 564 |
+
//
|
| 565 |
+
// Store functions for frags of shape m8n8k128
|
| 566 |
+
//
|
| 567 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 128, int>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 568 |
+
|
| 569 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 570 |
+
|
| 571 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 572 |
+
//
|
| 573 |
+
// Store functions for frags of shape m16n16k8
|
| 574 |
+
//
|
| 575 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 16, 16, 8, float>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 576 |
+
|
| 577 |
+
//
|
| 578 |
+
// Store functions for frags of shape m8n8k4
|
| 579 |
+
//
|
| 580 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment<accumulator, 8, 8, 4, double>& a, unsigned ldm, layout_t layout) __DEF_IF_HOST
|
| 581 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 582 |
+
|
| 583 |
+
//
|
| 584 |
+
// MMA functions for shape m16n16k16
|
| 585 |
+
//
|
| 586 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 587 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 588 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 589 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 590 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 591 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 592 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 593 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) __DEF_IF_HOST
|
| 594 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 595 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 596 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 597 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 598 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 599 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 600 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 601 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 602 |
+
|
| 603 |
+
#ifdef __CUDA_IMMA__
|
| 604 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 605 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 606 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 607 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 608 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 609 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 610 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 611 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 612 |
+
#endif /* __CUDA_IMMA__ */
|
| 613 |
+
|
| 614 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 615 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 616 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 617 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 618 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) __DEF_IF_HOST
|
| 619 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 620 |
+
|
| 621 |
+
//
|
| 622 |
+
// MMA functions for shape m32n8k16
|
| 623 |
+
//
|
| 624 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 625 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 626 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 627 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 628 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 629 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 630 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 631 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, __half>& c) __DEF_IF_HOST
|
| 632 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 633 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 634 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 635 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 636 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 637 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 638 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 639 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b, 32, 8, 16, __half, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 640 |
+
|
| 641 |
+
#ifdef __CUDA_IMMA__
|
| 642 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 643 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 644 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 645 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 646 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 647 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 648 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 649 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 650 |
+
#endif /* __CUDA_IMMA__ */
|
| 651 |
+
|
| 652 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 653 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 654 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 655 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 656 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) __DEF_IF_HOST
|
| 657 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 658 |
+
|
| 659 |
+
//
|
| 660 |
+
// MMA functions for shape m8n32k16
|
| 661 |
+
//
|
| 662 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 663 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 664 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 665 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 666 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 667 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 668 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 669 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, __half>& c) __DEF_IF_HOST
|
| 670 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 671 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 672 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 673 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 674 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 675 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 676 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 677 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b, 8, 32, 16, __half, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 678 |
+
|
| 679 |
+
#ifdef __CUDA_IMMA__
|
| 680 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 681 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 682 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 683 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 684 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 685 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 686 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 687 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf=false) __DEF_IF_HOST
|
| 688 |
+
#endif /* __CUDA_IMMA__ */
|
| 689 |
+
|
| 690 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 691 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 692 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 693 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 694 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) __DEF_IF_HOST
|
| 695 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 696 |
+
|
| 697 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 698 |
+
//
|
| 699 |
+
// MMA functions for shape m8n8k32
|
| 700 |
+
//
|
| 701 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf=false) __DEF_IF_HOST
|
| 702 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf=false) __DEF_IF_HOST
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
//
|
| 706 |
+
// MMA functions for shape m8n8k128
|
| 707 |
+
//
|
| 708 |
+
__CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment<accumulator, 8, 8, 128, int>& d, const fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& b, const fragment<accumulator, 8, 8, 128, int>& c,
|
| 709 |
+
experimental::bmmaBitOp = experimental::bmmaBitOpXOR,
|
| 710 |
+
experimental::bmmaAccumulateOp = experimental::bmmaAccumulateOpPOPC) __DEF_IF_HOST
|
| 711 |
+
|
| 712 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 713 |
+
|
| 714 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 715 |
+
//
|
| 716 |
+
// MMA functions for shape m16n16k8
|
| 717 |
+
//
|
| 718 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
|
| 719 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
|
| 720 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
|
| 721 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) __DEF_IF_HOST
|
| 722 |
+
|
| 723 |
+
//
|
| 724 |
+
// MMA functions for shape m8n8k4
|
| 725 |
+
//
|
| 726 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
|
| 727 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
|
| 728 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
|
| 729 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) __DEF_IF_HOST
|
| 730 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 731 |
+
};
|
| 732 |
+
};
|
| 733 |
+
|
| 734 |
+
#undef __DEF_IF_HOST
|
| 735 |
+
#undef __CUDA_IMMA__
|
| 736 |
+
#undef __CUDA_SUBBYTE_IMMA__
|
| 737 |
+
#undef __CUDA_AMPERE_MMA__
|
| 738 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
|
| 739 |
+
|
| 740 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 741 |
+
|
| 742 |
+
#undef __CUDA_MMA_DEVICE_DECL__
|
| 743 |
+
|
| 744 |
+
#if defined(__CUDA_ARCH__)
|
| 745 |
+
#include "mma.hpp"
|
| 746 |
+
#endif /* defined(__CUDA_ARCH__) */
|
| 747 |
+
|
| 748 |
+
|
| 749 |
+
#endif /* !__CUDA_MMA_H__ */
|
| 750 |
+
|
| 751 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__)
|
| 752 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 753 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H__
|
| 754 |
+
#endif
|
miniCUDA124/include/crt/mma.hpp
ADDED
|
@@ -0,0 +1,1128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2020 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/mma.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__CUDA_MMA_HPP__)
|
| 61 |
+
#define __CUDA_MMA_HPP__
|
| 62 |
+
|
| 63 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 64 |
+
|
| 65 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
|
| 66 |
+
|
| 67 |
+
#include <cuda_fp16.h>
|
| 68 |
+
#include <cuda_bf16.h>
|
| 69 |
+
|
| 70 |
+
#define __CUDA_MMA_DEVICE_DECL__ static __device__ __inline__
|
| 71 |
+
|
| 72 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720
|
| 73 |
+
#define __CUDA_IMMA__ 1
|
| 74 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 720 */
|
| 75 |
+
|
| 76 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730
|
| 77 |
+
#define __CUDA_SUBBYTE_IMMA__ 1
|
| 78 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 730 */
|
| 79 |
+
|
| 80 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
|
| 81 |
+
#define __CUDA_AMPERE_MMA__ 1
|
| 82 |
+
#endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 */
|
| 83 |
+
|
| 84 |
+
namespace nvcuda {
|
| 85 |
+
namespace wmma {
|
| 86 |
+
|
| 87 |
+
//
|
| 88 |
+
// Load functions for frags of shape m16n16k16
|
| 89 |
+
//
|
| 90 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 91 |
+
__hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 95 |
+
__hmma_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b,16, 16, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 99 |
+
__hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b,16, 16, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 103 |
+
__hmma_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator,16, 16, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) {
|
| 107 |
+
if (layout == mem_row_major)
|
| 108 |
+
__hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0);
|
| 109 |
+
else
|
| 110 |
+
__hmma_m16n16k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator,16, 16, 16, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 114 |
+
if (layout == mem_row_major)
|
| 115 |
+
__hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0);
|
| 116 |
+
else
|
| 117 |
+
__hmma_m16n16k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
#ifdef __CUDA_IMMA__
|
| 121 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 122 |
+
__imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 126 |
+
__imma_m16n16k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 130 |
+
__imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 134 |
+
__imma_m16n16k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 138 |
+
__imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 142 |
+
__imma_m16n16k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 146 |
+
__imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 150 |
+
__imma_m16n16k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1);
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator,16, 16, 16, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 154 |
+
if (layout == mem_row_major)
|
| 155 |
+
__imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 156 |
+
else
|
| 157 |
+
__imma_m16n16k16_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 158 |
+
}
|
| 159 |
+
#endif /* __CUDA_IMMA__ */
|
| 160 |
+
|
| 161 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 162 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 163 |
+
__mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 167 |
+
__mma_bf16_m16n16k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 171 |
+
__mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 175 |
+
__mma_bf16_m16n16k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 176 |
+
}
|
| 177 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
//
|
| 181 |
+
// Load functions for frags of shape m32n8k16
|
| 182 |
+
//
|
| 183 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 184 |
+
__hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 188 |
+
__hmma_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 192 |
+
__hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 196 |
+
__hmma_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) {
|
| 200 |
+
if (layout == mem_row_major)
|
| 201 |
+
__hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0);
|
| 202 |
+
else
|
| 203 |
+
__hmma_m32n8k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1);
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 207 |
+
if (layout == mem_row_major)
|
| 208 |
+
__hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0);
|
| 209 |
+
else
|
| 210 |
+
__hmma_m32n8k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
#ifdef __CUDA_IMMA__
|
| 214 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 215 |
+
__imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0);
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 219 |
+
__imma_m32n8k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1);
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 223 |
+
__imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 227 |
+
__imma_m32n8k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1);
|
| 228 |
+
}
|
| 229 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 230 |
+
__imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 234 |
+
__imma_m32n8k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 238 |
+
__imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 242 |
+
__imma_m32n8k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 32, 8, 16, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 246 |
+
if (layout == mem_row_major)
|
| 247 |
+
__imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 248 |
+
else
|
| 249 |
+
__imma_m32n8k16_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 250 |
+
}
|
| 251 |
+
#endif /* __CUDA_IMMA__ */
|
| 252 |
+
|
| 253 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 254 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 255 |
+
__mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 259 |
+
__mma_bf16_m32n8k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 263 |
+
__mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 267 |
+
__mma_bf16_m32n8k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 268 |
+
}
|
| 269 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
//
|
| 273 |
+
// Load functions for frags of shape m8n32k16
|
| 274 |
+
//
|
| 275 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 276 |
+
__hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 280 |
+
__hmma_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, row_major>& a, const __half* p, unsigned ldm) {
|
| 284 |
+
__hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __half, col_major>& a, const __half* p, unsigned ldm) {
|
| 288 |
+
__hmma_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, __half>& a, const __half* p, unsigned ldm, layout_t layout) {
|
| 292 |
+
if (layout == mem_row_major)
|
| 293 |
+
__hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 0);
|
| 294 |
+
else
|
| 295 |
+
__hmma_m8n32k16_ld_c_f16((int*)&a, (const int*)p, ldm, 1);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 299 |
+
if (layout == mem_row_major)
|
| 300 |
+
__hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 0);
|
| 301 |
+
else
|
| 302 |
+
__hmma_m8n32k16_ld_c_f32((float*)&a, (const float*)p, ldm, 1);
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
#ifdef __CUDA_IMMA__
|
| 306 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 307 |
+
__imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 0);
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 311 |
+
__imma_m8n32k16_ld_a_s8((int *)&a, (const int *)p, ldm, 1);
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 315 |
+
__imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 0);
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 319 |
+
__imma_m8n32k16_ld_a_u8((int *)&a, (const int *)p, ldm, 1);
|
| 320 |
+
}
|
| 321 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, row_major>& a, const signed char* p, unsigned ldm) {
|
| 322 |
+
__imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 0);
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, signed char, col_major>& a, const signed char* p, unsigned ldm) {
|
| 326 |
+
__imma_m8n32k16_ld_b_s8((int *)&a, (const int *)p, ldm, 1);
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& a, const unsigned char* p, unsigned ldm) {
|
| 330 |
+
__imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 0);
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& a, const unsigned char* p, unsigned ldm) {
|
| 334 |
+
__imma_m8n32k16_ld_b_u8((int *)&a, (const int *)p, ldm, 1);
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 32, 16, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 338 |
+
if (layout == mem_row_major)
|
| 339 |
+
__imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 340 |
+
else
|
| 341 |
+
__imma_m8n32k16_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 342 |
+
}
|
| 343 |
+
#endif /* __CUDA_IMMA__ */
|
| 344 |
+
|
| 345 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 346 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 347 |
+
__mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 0);
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 351 |
+
__mma_bf16_m8n32k16_ld_a((int*)&a, (const int*)p, ldm, 1);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 355 |
+
__mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 0);
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& a, const __nv_bfloat16* p, unsigned ldm) {
|
| 359 |
+
__mma_bf16_m8n32k16_ld_b((int*)&a, (const int*)p, ldm, 1);
|
| 360 |
+
}
|
| 361 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 365 |
+
//
|
| 366 |
+
// Load functions for frags of shape m8n8k32
|
| 367 |
+
//
|
| 368 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const void* p, unsigned ldm) {
|
| 369 |
+
__imma_m8n8k32_ld_a_s4((int *)&a, (const int *)p, ldm, 0);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const void* p, unsigned ldm) {
|
| 373 |
+
__imma_m8n8k32_ld_a_u4((int *)&a, (const int *)p, ldm, 0);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& a, const void* p, unsigned ldm) {
|
| 377 |
+
__imma_m8n8k32_ld_b_s4((int *)&a, (const int *)p, ldm, 1);
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& a, const void* p, unsigned ldm) {
|
| 381 |
+
__imma_m8n8k32_ld_b_u4((int *)&a, (const int *)p, ldm, 1);
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 32, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 385 |
+
if (layout == mem_row_major)
|
| 386 |
+
__imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 387 |
+
else
|
| 388 |
+
__imma_m8n8k32_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
//
|
| 392 |
+
// Load functions for frags of shape m8n8k128
|
| 393 |
+
//
|
| 394 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const void* p, unsigned ldm) {
|
| 395 |
+
__bmma_m8n8k128_ld_a_b1((int *)&a, (const int *)p, ldm, 0);
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& a, const void* p, unsigned ldm) {
|
| 399 |
+
__bmma_m8n8k128_ld_b_b1((int *)&a, (const int *)p, ldm, 1);
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 128, int>& a, const int* p, unsigned ldm, layout_t layout) {
|
| 403 |
+
if (layout == mem_row_major)
|
| 404 |
+
__bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 0);
|
| 405 |
+
else
|
| 406 |
+
__bmma_m8n8k128_ld_c((int *)&a, (const int*)p, ldm, 1);
|
| 407 |
+
}
|
| 408 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 413 |
+
// load functions for frags of shape m16n16k8
|
| 414 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) {
|
| 415 |
+
__mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 0);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) {
|
| 419 |
+
__mma_tf32_m16n16k8_ld_a((int *)&a, (const int *)p, ldm, 1);
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& a, const float* p, unsigned ldm) {
|
| 423 |
+
__mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 0);
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& a, const float* p, unsigned ldm) {
|
| 427 |
+
__mma_tf32_m16n16k8_ld_b((int *)&a, (const int *)p, ldm, 1);
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 16, 16, 8, float>& a, const float* p, unsigned ldm, layout_t layout) {
|
| 431 |
+
if (layout == mem_row_major)
|
| 432 |
+
__mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 0);
|
| 433 |
+
else
|
| 434 |
+
__mma_tf32_m16n16k8_ld_c((float *)&a, p, ldm, 1);
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
// load functions for frags of shape m8n8k4
|
| 438 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) {
|
| 439 |
+
__dmma_m8n8k4_ld_a((double *)&a, p, ldm, 0);
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_a, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) {
|
| 443 |
+
__dmma_m8n8k4_ld_a((double *)&a, p, ldm, 1);
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, row_major>& a, const double* p, unsigned ldm) {
|
| 447 |
+
__dmma_m8n8k4_ld_b((double *)&a, p, ldm, 0);
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<matrix_b, 8, 8, 4, double, col_major>& a, const double* p, unsigned ldm) {
|
| 451 |
+
__dmma_m8n8k4_ld_b((double *)&a, p, ldm, 1);
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
__CUDA_MMA_DEVICE_DECL__ void load_matrix_sync(fragment<accumulator, 8, 8, 4, double>& a, const double* p, unsigned ldm, layout_t layout) {
|
| 455 |
+
if (layout == mem_row_major)
|
| 456 |
+
__dmma_m8n8k4_ld_c((double *)&a, p, ldm, 0);
|
| 457 |
+
else
|
| 458 |
+
__dmma_m8n8k4_ld_c((double *)&a, p, ldm, 1);
|
| 459 |
+
}
|
| 460 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 461 |
+
|
| 462 |
+
//
|
| 463 |
+
// Store functions for frags of shape m16n16k16
|
| 464 |
+
//
|
| 465 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator,16, 16, 16, __half>& a, unsigned ldm, layout_t layout) {
|
| 466 |
+
if (layout == mem_row_major)
|
| 467 |
+
__hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 0);
|
| 468 |
+
else
|
| 469 |
+
__hmma_m16n16k16_st_c_f16((int*)p, (int*)&a, ldm, 1);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator,16, 16, 16, float>& a, unsigned ldm, layout_t layout) {
|
| 473 |
+
if (layout == mem_row_major)
|
| 474 |
+
__hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 0);
|
| 475 |
+
else
|
| 476 |
+
__hmma_m16n16k16_st_c_f32((float*)p, (float*)&a, ldm, 1);
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
#ifdef __CUDA_IMMA__
|
| 480 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator,16, 16, 16, int>& a, unsigned ldm, layout_t layout) {
|
| 481 |
+
if (layout == mem_row_major)
|
| 482 |
+
__imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 483 |
+
else
|
| 484 |
+
__imma_m16n16k16_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 485 |
+
}
|
| 486 |
+
#endif /* __CUDA_IMMA__ */
|
| 487 |
+
|
| 488 |
+
//
|
| 489 |
+
// Store functions for frags of shape m32n8k16
|
| 490 |
+
//
|
| 491 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 32, 8, 16, __half>& a, unsigned ldm, layout_t layout) {
|
| 492 |
+
if (layout == mem_row_major)
|
| 493 |
+
__hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 0);
|
| 494 |
+
else
|
| 495 |
+
__hmma_m32n8k16_st_c_f16((int*)p, (int*)&a, ldm, 1);
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 32, 8, 16, float>& a, unsigned ldm, layout_t layout) {
|
| 499 |
+
if (layout == mem_row_major)
|
| 500 |
+
__hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 0);
|
| 501 |
+
else
|
| 502 |
+
__hmma_m32n8k16_st_c_f32((float*)p, (float*)&a, ldm, 1);
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
#ifdef __CUDA_IMMA__
|
| 506 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 32, 8, 16, int>& a, unsigned ldm, layout_t layout) {
|
| 507 |
+
if (layout == mem_row_major)
|
| 508 |
+
__imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 509 |
+
else
|
| 510 |
+
__imma_m32n8k16_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 511 |
+
}
|
| 512 |
+
#endif /* __CUDA_IMMA__ */
|
| 513 |
+
|
| 514 |
+
//
|
| 515 |
+
// Store functions for frags of shape m8n32k16
|
| 516 |
+
//
|
| 517 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(__half *p, const fragment<accumulator, 8, 32, 16, __half>& a, unsigned ldm, layout_t layout) {
|
| 518 |
+
if (layout == mem_row_major)
|
| 519 |
+
__hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 0);
|
| 520 |
+
else
|
| 521 |
+
__hmma_m8n32k16_st_c_f16((int*)p, (int*)&a, ldm, 1);
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 8, 32, 16, float>& a, unsigned ldm, layout_t layout) {
|
| 525 |
+
if (layout == mem_row_major)
|
| 526 |
+
__hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 0);
|
| 527 |
+
else
|
| 528 |
+
__hmma_m8n32k16_st_c_f32((float*)p, (float*)&a, ldm, 1);
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
#ifdef __CUDA_IMMA__
|
| 532 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 32, 16, int>& a, unsigned ldm, layout_t layout) {
|
| 533 |
+
if (layout == mem_row_major)
|
| 534 |
+
__imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 535 |
+
else
|
| 536 |
+
__imma_m8n32k16_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 537 |
+
}
|
| 538 |
+
#endif /* __CUDA_IMMA__ */
|
| 539 |
+
|
| 540 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 541 |
+
//
|
| 542 |
+
// Store functions for frags of shape m8n8k32
|
| 543 |
+
//
|
| 544 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 32, int>& a, unsigned ldm, layout_t layout) {
|
| 545 |
+
if (layout == mem_row_major)
|
| 546 |
+
__imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 547 |
+
else
|
| 548 |
+
__imma_m8n8k32_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
//
|
| 552 |
+
// Store functions for frags of shape m8n8k128
|
| 553 |
+
//
|
| 554 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(int *p, const fragment<accumulator, 8, 8, 128, int>& a, unsigned ldm, layout_t layout) {
|
| 555 |
+
if (layout == mem_row_major)
|
| 556 |
+
__bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 0);
|
| 557 |
+
else
|
| 558 |
+
__bmma_m8n8k128_st_c_i32(p, (const int*)&a, ldm, 1);
|
| 559 |
+
}
|
| 560 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 564 |
+
|
| 565 |
+
//
|
| 566 |
+
// Store functions for frags of shape m16n16k8
|
| 567 |
+
//
|
| 568 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(float *p, const fragment<accumulator, 16, 16, 8, float>& a, unsigned ldm, layout_t layout) {
|
| 569 |
+
if (layout == mem_row_major)
|
| 570 |
+
__mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 0);
|
| 571 |
+
else
|
| 572 |
+
__mma_m16n16k8_st_c_f32(p, (const float*)&a, ldm, 1);
|
| 573 |
+
}
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
//
|
| 577 |
+
// Store functions for frags of shape m8n8k4
|
| 578 |
+
//
|
| 579 |
+
__CUDA_MMA_DEVICE_DECL__ void store_matrix_sync(double *p, const fragment<accumulator, 8, 8, 4, double>& a, unsigned ldm, layout_t layout) {
|
| 580 |
+
if (layout == mem_row_major)
|
| 581 |
+
__dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 0);
|
| 582 |
+
else
|
| 583 |
+
__dmma_m8n8k4_st_c_f64(p, (const double*)&a, ldm, 1);
|
| 584 |
+
}
|
| 585 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 586 |
+
|
| 587 |
+
//
|
| 588 |
+
// MMA functions for shape m16n16k16
|
| 589 |
+
//
|
| 590 |
+
// D fp16, C fp16
|
| 591 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 592 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 596 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 597 |
+
}
|
| 598 |
+
|
| 599 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 600 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 604 |
+
__hmma_m16n16k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
// D fp32, C fp16
|
| 608 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 609 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 613 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 617 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, __half>& c) {
|
| 621 |
+
__hmma_m16n16k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
// D fp32, C fp32
|
| 625 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 626 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 630 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 634 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 638 |
+
__hmma_m16n16k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
// D fp16, C fp32
|
| 642 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 643 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 647 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, row_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 651 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, __half>& d, const fragment<matrix_a, 16, 16, 16, __half, col_major>& a, const fragment<matrix_b,16, 16, 16, __half, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 655 |
+
__hmma_m16n16k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
#ifdef __CUDA_IMMA__
|
| 659 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 660 |
+
if (satf)
|
| 661 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1);
|
| 662 |
+
else
|
| 663 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0);
|
| 664 |
+
}
|
| 665 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 666 |
+
if (satf)
|
| 667 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1);
|
| 668 |
+
else
|
| 669 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0);
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, row_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 673 |
+
if (satf)
|
| 674 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1);
|
| 675 |
+
else
|
| 676 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0);
|
| 677 |
+
}
|
| 678 |
+
|
| 679 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, signed char, col_major>& a, const fragment<matrix_b,16, 16, 16, signed char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 680 |
+
if (satf)
|
| 681 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1);
|
| 682 |
+
else
|
| 683 |
+
__imma_m16n16k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0);
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 687 |
+
if (satf)
|
| 688 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 1);
|
| 689 |
+
else
|
| 690 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 1, 0);
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, col_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 694 |
+
if (satf)
|
| 695 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 1);
|
| 696 |
+
else
|
| 697 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 3, 0);
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, row_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 701 |
+
if (satf)
|
| 702 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 1);
|
| 703 |
+
else
|
| 704 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 0, 0);
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, int>& d, const fragment<matrix_a, 16, 16, 16, unsigned char, col_major>& a, const fragment<matrix_b,16, 16, 16, unsigned char, row_major>& b, const fragment<accumulator,16, 16, 16, int>& c, bool satf) {
|
| 708 |
+
if (satf)
|
| 709 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 1);
|
| 710 |
+
else
|
| 711 |
+
__imma_m16n16k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int *)&c, 2, 0);
|
| 712 |
+
}
|
| 713 |
+
#endif /* __CUDA_IMMA__ */
|
| 714 |
+
|
| 715 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 716 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 717 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 718 |
+
}
|
| 719 |
+
|
| 720 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 721 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 722 |
+
}
|
| 723 |
+
|
| 724 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 725 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,16, 16, 16, float>& d, const fragment<matrix_a, 16, 16, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b,16, 16, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator,16, 16, 16, float>& c) {
|
| 729 |
+
__mma_bf16_m16n16k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 730 |
+
}
|
| 731 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
//
|
| 735 |
+
// MMA functions for shape m32n8k16
|
| 736 |
+
//
|
| 737 |
+
// D fp16, C fp16
|
| 738 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 739 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 743 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 747 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 751 |
+
__hmma_m32n8k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
// D fp32, C fp16
|
| 755 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 756 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 760 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 761 |
+
}
|
| 762 |
+
|
| 763 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 764 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 765 |
+
}
|
| 766 |
+
|
| 767 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, __half>& c) {
|
| 768 |
+
__hmma_m32n8k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 769 |
+
}
|
| 770 |
+
|
| 771 |
+
// D fp32, C fp32
|
| 772 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 773 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 774 |
+
}
|
| 775 |
+
|
| 776 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 777 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 781 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 785 |
+
__hmma_m32n8k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
// D fp16, C fp32
|
| 789 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 790 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 791 |
+
}
|
| 792 |
+
|
| 793 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, col_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 794 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, row_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 798 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 799 |
+
}
|
| 800 |
+
|
| 801 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,32, 8, 16, __half>& d, const fragment<matrix_a, 32, 8, 16, __half, col_major>& a, const fragment<matrix_b,32, 8, 16, __half, row_major>& b, const fragment<accumulator,32, 8, 16, float>& c) {
|
| 802 |
+
__hmma_m32n8k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
#ifdef __CUDA_IMMA__
|
| 806 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 807 |
+
if (satf)
|
| 808 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 809 |
+
else
|
| 810 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 811 |
+
}
|
| 812 |
+
|
| 813 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 814 |
+
if (satf)
|
| 815 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 816 |
+
else
|
| 817 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 818 |
+
}
|
| 819 |
+
|
| 820 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, row_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 821 |
+
if (satf)
|
| 822 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 823 |
+
else
|
| 824 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, signed char, col_major>& a, const fragment<matrix_b, 32, 8, 16, signed char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 828 |
+
if (satf)
|
| 829 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 830 |
+
else
|
| 831 |
+
__imma_m32n8k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 835 |
+
if (satf)
|
| 836 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 837 |
+
else
|
| 838 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 839 |
+
}
|
| 840 |
+
|
| 841 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, col_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 842 |
+
if (satf)
|
| 843 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 844 |
+
else
|
| 845 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 846 |
+
|
| 847 |
+
}
|
| 848 |
+
|
| 849 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, row_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 850 |
+
if (satf)
|
| 851 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 852 |
+
else
|
| 853 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 854 |
+
|
| 855 |
+
}
|
| 856 |
+
|
| 857 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, int>& d, const fragment<matrix_a, 32, 8, 16, unsigned char, col_major>& a, const fragment<matrix_b, 32, 8, 16, unsigned char, row_major>& b, const fragment<accumulator, 32, 8, 16, int>& c, bool satf) {
|
| 858 |
+
if (satf)
|
| 859 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 860 |
+
else
|
| 861 |
+
__imma_m32n8k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 862 |
+
|
| 863 |
+
}
|
| 864 |
+
#endif /* __CUDA_IMMA__ */
|
| 865 |
+
|
| 866 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 867 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 868 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 872 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 873 |
+
}
|
| 874 |
+
|
| 875 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 876 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 877 |
+
}
|
| 878 |
+
|
| 879 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 32, 8, 16, float>& d, const fragment<matrix_a, 32, 8, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 32, 8, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 32, 8, 16, float>& c) {
|
| 880 |
+
__mma_bf16_m32n8k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 881 |
+
}
|
| 882 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 883 |
+
|
| 884 |
+
//
|
| 885 |
+
// MMA functions for shape m8n32k16
|
| 886 |
+
//
|
| 887 |
+
// D fp16, C fp16
|
| 888 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 889 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 893 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 897 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 898 |
+
}
|
| 899 |
+
|
| 900 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 901 |
+
__hmma_m8n32k16_mma_f16f16((int*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 902 |
+
}
|
| 903 |
+
|
| 904 |
+
// D fp32, C fp16
|
| 905 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 906 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 1, 0);
|
| 907 |
+
}
|
| 908 |
+
|
| 909 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 910 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 3, 0);
|
| 911 |
+
}
|
| 912 |
+
|
| 913 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 914 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 0, 0);
|
| 915 |
+
}
|
| 916 |
+
|
| 917 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, __half>& c) {
|
| 918 |
+
__hmma_m8n32k16_mma_f32f16((float*)&d, (const int*)&a, (const int*)&b, (const int*)&c, 2, 0);
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
// D fp32, C fp32
|
| 922 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 923 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 927 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 928 |
+
}
|
| 929 |
+
|
| 930 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 931 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 932 |
+
}
|
| 933 |
+
|
| 934 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 935 |
+
__hmma_m8n32k16_mma_f32f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 936 |
+
}
|
| 937 |
+
|
| 938 |
+
// D fp16, C fp32
|
| 939 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 940 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 941 |
+
}
|
| 942 |
+
|
| 943 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, col_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 944 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 945 |
+
}
|
| 946 |
+
|
| 947 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, row_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 948 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator,8, 32, 16, __half>& d, const fragment<matrix_a, 8, 32, 16, __half, col_major>& a, const fragment<matrix_b,8, 32, 16, __half, row_major>& b, const fragment<accumulator,8, 32, 16, float>& c) {
|
| 952 |
+
__hmma_m8n32k16_mma_f16f32((int*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 953 |
+
}
|
| 954 |
+
|
| 955 |
+
#ifdef __CUDA_IMMA__
|
| 956 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 957 |
+
if (satf)
|
| 958 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 959 |
+
else
|
| 960 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 961 |
+
}
|
| 962 |
+
|
| 963 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 964 |
+
if (satf)
|
| 965 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 966 |
+
else
|
| 967 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 968 |
+
}
|
| 969 |
+
|
| 970 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, row_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 971 |
+
if (satf)
|
| 972 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 973 |
+
else
|
| 974 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, signed char, col_major>& a, const fragment<matrix_b, 8, 32, 16, signed char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 978 |
+
if (satf)
|
| 979 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 980 |
+
else
|
| 981 |
+
__imma_m8n32k16_mma_s8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 985 |
+
if (satf)
|
| 986 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 987 |
+
else
|
| 988 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 989 |
+
}
|
| 990 |
+
|
| 991 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, col_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 992 |
+
if (satf)
|
| 993 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 1);
|
| 994 |
+
else
|
| 995 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 3, 0);
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, row_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 999 |
+
if (satf)
|
| 1000 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 1);
|
| 1001 |
+
else
|
| 1002 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 0, 0);
|
| 1003 |
+
}
|
| 1004 |
+
|
| 1005 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, int>& d, const fragment<matrix_a, 8, 32, 16, unsigned char, col_major>& a, const fragment<matrix_b, 8, 32, 16, unsigned char, row_major>& b, const fragment<accumulator, 8, 32, 16, int>& c, bool satf) {
|
| 1006 |
+
if (satf)
|
| 1007 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 1);
|
| 1008 |
+
else
|
| 1009 |
+
__imma_m8n32k16_mma_u8((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 2, 0);
|
| 1010 |
+
}
|
| 1011 |
+
#endif /* __CUDA_IMMA__ */
|
| 1012 |
+
|
| 1013 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 1014 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1015 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 1016 |
+
}
|
| 1017 |
+
|
| 1018 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, col_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1019 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 1020 |
+
}
|
| 1021 |
+
|
| 1022 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, row_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1023 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 1024 |
+
}
|
| 1025 |
+
|
| 1026 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 32, 16, float>& d, const fragment<matrix_a, 8, 32, 16, __nv_bfloat16, col_major>& a, const fragment<matrix_b, 8, 32, 16, __nv_bfloat16, row_major>& b, const fragment<accumulator, 8, 32, 16, float>& c) {
|
| 1027 |
+
__mma_bf16_m8n32k16_mma_f32((float*)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 1028 |
+
}
|
| 1029 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
#ifdef __CUDA_SUBBYTE_IMMA__
|
| 1033 |
+
//
|
| 1034 |
+
// MMA functions for shape m8n8k32
|
| 1035 |
+
//
|
| 1036 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::s4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::s4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf) {
|
| 1037 |
+
if (satf)
|
| 1038 |
+
__imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 1039 |
+
else
|
| 1040 |
+
__imma_m8n8k32_mma_s4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 32, int>& d, const fragment<matrix_a, 8, 8, 32, experimental::precision::u4, row_major>& a, const fragment<matrix_b, 8, 8, 32, experimental::precision::u4, col_major>& b, const fragment<accumulator, 8, 8, 32, int>& c, bool satf) {
|
| 1044 |
+
if (satf)
|
| 1045 |
+
__imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 1);
|
| 1046 |
+
else
|
| 1047 |
+
__imma_m8n8k32_mma_u4((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1, 0);
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
//
|
| 1051 |
+
// MMA functions for shape m8n8k128
|
| 1052 |
+
//
|
| 1053 |
+
__CUDA_MMA_DEVICE_DECL__ void bmma_sync(fragment<accumulator, 8, 8, 128, int>& d, const fragment<matrix_a, 8, 8, 128, experimental::precision::b1, row_major>& a, const fragment<matrix_b, 8, 8, 128, experimental::precision::b1, col_major>& b, const fragment<accumulator, 8, 8, 128, int>& c,
|
| 1054 |
+
experimental::bmmaBitOp op, experimental::bmmaAccumulateOp)
|
| 1055 |
+
{
|
| 1056 |
+
|
| 1057 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 1058 |
+
if (op == experimental::bmmaBitOpAND)
|
| 1059 |
+
__bmma_m8n8k128_mma_and_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1);
|
| 1060 |
+
else
|
| 1061 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 1062 |
+
__bmma_m8n8k128_mma_xor_popc_b1((int*)&d, (const int *)&a, (const int *)&b, (const int*)&c, 1);
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
|
| 1066 |
+
#endif /* __CUDA_SUBBYTE_IMMA__ */
|
| 1067 |
+
|
| 1068 |
+
#ifdef __CUDA_AMPERE_MMA__
|
| 1069 |
+
//
|
| 1070 |
+
// MMA functions for shape m16n16k8
|
| 1071 |
+
//
|
| 1072 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1073 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 1, 0);
|
| 1074 |
+
}
|
| 1075 |
+
|
| 1076 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, col_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1077 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 3, 0);
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, row_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1081 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 0, 0);
|
| 1082 |
+
}
|
| 1083 |
+
|
| 1084 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 16, 16, 8, float>& d, const fragment<matrix_a, 16, 16, 8, precision::tf32, col_major>& a, const fragment<matrix_b, 16, 16, 8, precision::tf32, row_major>& b, const fragment<accumulator, 16, 16, 8, float>& c) {
|
| 1085 |
+
__mma_tf32_m16n16k8_mma_f32((float *)&d, (const int*)&a, (const int*)&b, (const float*)&c, 2, 0);
|
| 1086 |
+
}
|
| 1087 |
+
|
| 1088 |
+
|
| 1089 |
+
//
|
| 1090 |
+
// MMA functions for shape m8n8k4
|
| 1091 |
+
//
|
| 1092 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1093 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 1, 0);
|
| 1094 |
+
}
|
| 1095 |
+
|
| 1096 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, col_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1097 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 3, 0);
|
| 1098 |
+
}
|
| 1099 |
+
|
| 1100 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, row_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1101 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 0, 0);
|
| 1102 |
+
}
|
| 1103 |
+
|
| 1104 |
+
__CUDA_MMA_DEVICE_DECL__ void mma_sync(fragment<accumulator, 8, 8, 4, double>& d, const fragment<matrix_a, 8, 8, 4, double, col_major>& a, const fragment<matrix_b, 8, 8, 4, double, row_major>& b, const fragment<accumulator, 8, 8, 4, double>& c) {
|
| 1105 |
+
__dmma_m8n8k4_mma_f64((double *)&d, (const double*)&a, (const double*)&b, (const double*)&c, 2, 0);
|
| 1106 |
+
}
|
| 1107 |
+
|
| 1108 |
+
#endif /* __CUDA_AMPERE_MMA__ */
|
| 1109 |
+
|
| 1110 |
+
};
|
| 1111 |
+
};
|
| 1112 |
+
|
| 1113 |
+
#undef __CUDA_IMMA__
|
| 1114 |
+
#undef __CUDA_SUBBYTE_IMMA__
|
| 1115 |
+
#undef __CUDA_MMA_DEVICE_DECL__
|
| 1116 |
+
#undef __CUDA_AMPERE_MMA__
|
| 1117 |
+
|
| 1118 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
|
| 1119 |
+
|
| 1120 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
#endif /* __CUDA_MMA_HPP__ */
|
| 1124 |
+
|
| 1125 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__)
|
| 1126 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 1127 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_HPP__
|
| 1128 |
+
#endif
|
miniCUDA124/include/crt/nvfunctional
ADDED
|
@@ -0,0 +1,621 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* NVIDIA_COPYRIGHT_BEGIN
|
| 3 |
+
*
|
| 4 |
+
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 7 |
+
* and proprietary rights in and to this software, related documentation
|
| 8 |
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
| 9 |
+
* distribution of this software and related documentation without an express
|
| 10 |
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 11 |
+
*
|
| 12 |
+
* NVIDIA_COPYRIGHT_END
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 16 |
+
#if defined(_MSC_VER)
|
| 17 |
+
#pragma message("crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead.")
|
| 18 |
+
#else
|
| 19 |
+
#warning "crt/nvfunctional is an internal header file and must not be used directly. Please use nvfunctional instead."
|
| 20 |
+
#endif
|
| 21 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 22 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#ifndef __NV_LIBCXX_FUNCTIONAL_H__
|
| 26 |
+
#define __NV_LIBCXX_FUNCTIONAL_H__
|
| 27 |
+
|
| 28 |
+
#if __cplusplus < 201103L
|
| 29 |
+
#if defined(_MSC_VER)
|
| 30 |
+
#if _MSC_VER < 1800
|
| 31 |
+
#error This library requires VS 2013 and above
|
| 32 |
+
#endif /* _MSC_VER < 1800 */
|
| 33 |
+
#else /* !_MSC_VER */
|
| 34 |
+
#error This library requires support for the ISO C++ 2011 standard
|
| 35 |
+
#endif /* _MSC_VER */
|
| 36 |
+
#endif /* __cplusplus */
|
| 37 |
+
|
| 38 |
+
#if defined(_MSC_VER)
|
| 39 |
+
#define __NV_ALIGNOF __alignof
|
| 40 |
+
#define __NV_NOEXCEPT
|
| 41 |
+
#define __NV_CONSTEXPR
|
| 42 |
+
#else /* !_MSC_VER */
|
| 43 |
+
#define __NV_ALIGNOF alignof
|
| 44 |
+
#define __NV_NOEXCEPT noexcept
|
| 45 |
+
#define __NV_CONSTEXPR constexpr
|
| 46 |
+
#endif /* _MSC_VER */
|
| 47 |
+
|
| 48 |
+
#include <type_traits>
|
| 49 |
+
#include <cstddef>
|
| 50 |
+
#include <new>
|
| 51 |
+
|
| 52 |
+
// n3290 20.8
|
| 53 |
+
namespace nvstd
|
| 54 |
+
{
|
| 55 |
+
|
| 56 |
+
namespace internal {
|
| 57 |
+
|
| 58 |
+
// D.8.1 base (deprecated) [depr.base]
|
| 59 |
+
template <class _Arg, class _Result>
|
| 60 |
+
struct unary_function
|
| 61 |
+
{
|
| 62 |
+
typedef _Arg argument_type;
|
| 63 |
+
typedef _Result result_type;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
template <class _Arg1, class _Arg2, class _Result>
|
| 67 |
+
struct binary_function
|
| 68 |
+
{
|
| 69 |
+
typedef _Arg1 first_argument_type;
|
| 70 |
+
typedef _Arg2 second_argument_type;
|
| 71 |
+
typedef _Result result_type;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
// move
|
| 75 |
+
template <class _T>
|
| 76 |
+
inline __device__ __host__
|
| 77 |
+
typename std::remove_reference<_T>::type&& move(_T&& __t) __NV_NOEXCEPT
|
| 78 |
+
{
|
| 79 |
+
return static_cast<typename std::remove_reference<_T>::type&&>(__t);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// 20.2.2 swap [utility.swap]
|
| 83 |
+
// swap
|
| 84 |
+
template<class _T,
|
| 85 |
+
class = typename std::enable_if<
|
| 86 |
+
std::is_move_constructible<_T>::value &&
|
| 87 |
+
std::is_move_assignable<_T>::value>::type>
|
| 88 |
+
inline __device__ __host__
|
| 89 |
+
void swap(_T& __a, _T& __b)
|
| 90 |
+
#if !defined(_MSC_VER)
|
| 91 |
+
noexcept(std::is_nothrow_move_constructible<_T>::value &&
|
| 92 |
+
std::is_nothrow_move_assignable<_T>::value)
|
| 93 |
+
#endif /* !defined(_MSC_VER) */
|
| 94 |
+
{
|
| 95 |
+
_T __t(internal::move(__a));
|
| 96 |
+
__a = internal::move(__b);
|
| 97 |
+
__b = internal::move(__t);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// 20.2.3 forward/move helpers [forward]
|
| 101 |
+
// forward
|
| 102 |
+
template <class _T>
|
| 103 |
+
inline __device__ __host__
|
| 104 |
+
_T&& forward(typename std::remove_reference<_T>::type& __t) __NV_NOEXCEPT
|
| 105 |
+
{
|
| 106 |
+
return static_cast<_T&&>(__t);
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
template <class _T>
|
| 110 |
+
inline __device__ __host__
|
| 111 |
+
_T&& forward(typename std::remove_reference<_T>::type&& __t) __NV_NOEXCEPT
|
| 112 |
+
{
|
| 113 |
+
static_assert(!std::is_lvalue_reference<_T>::value,
|
| 114 |
+
"Error: __t is instantiated with an lvalue reference type");
|
| 115 |
+
return static_cast<_T&&>(__t);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
} // namespace internal
|
| 119 |
+
|
| 120 |
+
namespace __functional_helpers
|
| 121 |
+
{
|
| 122 |
+
|
| 123 |
+
struct __dummy_class;
|
| 124 |
+
|
| 125 |
+
// Store small functors locally:
|
| 126 |
+
// a functor is legitimate to local storage if it is one of the following types:
|
| 127 |
+
// * member object pointer;
|
| 128 |
+
// * member function pointer;
|
| 129 |
+
// * closure type of size less than or equal to the largest size of
|
| 130 |
+
// the above types;
|
| 131 |
+
// * function pointer;
|
| 132 |
+
// * any callable class whose size is less than or equal to
|
| 133 |
+
// the largest one of the above types;
|
| 134 |
+
union _Small_functor_types
|
| 135 |
+
{
|
| 136 |
+
void *__obj;
|
| 137 |
+
void (*__func_ptr)();
|
| 138 |
+
void (__dummy_class::*mem_fn_ptr)();
|
| 139 |
+
};
|
| 140 |
+
|
| 141 |
+
struct _Small_functor_data {
|
| 142 |
+
char __data[sizeof(_Small_functor_types)];
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
template <class _RetType, class ..._ArgTypes>
|
| 146 |
+
struct __maybe_base_function
|
| 147 |
+
{ };
|
| 148 |
+
|
| 149 |
+
template <class _RetType, class _T1>
|
| 150 |
+
struct __maybe_base_function<_RetType(_T1)>
|
| 151 |
+
: public internal::unary_function<_T1, _RetType>
|
| 152 |
+
{ };
|
| 153 |
+
|
| 154 |
+
template <class _RetType, class _T1, class _T2>
|
| 155 |
+
struct __maybe_base_function<_RetType(_T1, _T2)>
|
| 156 |
+
: public internal::binary_function<_T1, _T2, _RetType>
|
| 157 |
+
{ };
|
| 158 |
+
|
| 159 |
+
} // namespace __functional_helpers
|
| 160 |
+
|
| 161 |
+
// 20.8.11 Polymorphic function wrappers [func.wrap]
|
| 162 |
+
|
| 163 |
+
// 20.8.11.1 Class bad_function_call [func.wrap.badcall]
|
| 164 |
+
// unimplemented because of exception
|
| 165 |
+
// class bad_function_call : public std::exception
|
| 166 |
+
|
| 167 |
+
// 20.8.11.2 Class template function [func.wrap.func]
|
| 168 |
+
|
| 169 |
+
template<class> class function; // undefined
|
| 170 |
+
|
| 171 |
+
// Simplified version of template class function, which
|
| 172 |
+
// * does not support allocator_arg_t;
|
| 173 |
+
// * does not support target and target_type that rely on RTTI
|
| 174 |
+
// * does not throw bad_function_call exception on invoking a NULL target
|
| 175 |
+
template <class _RetType, class ..._ArgTypes>
|
| 176 |
+
class function<_RetType(_ArgTypes...)>
|
| 177 |
+
: public __functional_helpers::__maybe_base_function<_RetType(_ArgTypes...)>
|
| 178 |
+
{
|
| 179 |
+
__functional_helpers::_Small_functor_data __small_functor_data;
|
| 180 |
+
void *__obj;
|
| 181 |
+
typedef _RetType(*__meta_fn_type)(void *, _ArgTypes...);
|
| 182 |
+
__meta_fn_type __meta_fn;
|
| 183 |
+
typedef void(*__cloner_type)(function &, const function &);
|
| 184 |
+
__cloner_type __cloner;
|
| 185 |
+
typedef void(*__destructor_type)(function *);
|
| 186 |
+
__destructor_type __destructor;
|
| 187 |
+
|
| 188 |
+
#pragma nv_exec_check_disable
|
| 189 |
+
template <class _F>
|
| 190 |
+
__device__ __host__
|
| 191 |
+
__NV_CONSTEXPR bool __use_small_functor_data() const
|
| 192 |
+
{
|
| 193 |
+
return (sizeof(_F) <= sizeof(__small_functor_data) &&
|
| 194 |
+
__NV_ALIGNOF(_F) <= __NV_ALIGNOF(
|
| 195 |
+
__functional_helpers::_Small_functor_types));
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
#pragma nv_exec_check_disable
|
| 199 |
+
__device__ __host__
|
| 200 |
+
void* __get_small_functor_data() const
|
| 201 |
+
{
|
| 202 |
+
return (void*)(&__small_functor_data.__data[0]);
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
#pragma nv_exec_check_disable
|
| 206 |
+
__device__ __host__
|
| 207 |
+
bool __is_small_functor_data() const
|
| 208 |
+
{
|
| 209 |
+
return __obj == __get_small_functor_data();
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
#pragma nv_exec_check_disable
|
| 213 |
+
template <class _F>
|
| 214 |
+
__device__ __host__
|
| 215 |
+
static _F& __get_functor(void *__p)
|
| 216 |
+
{
|
| 217 |
+
return *((_F*)__p);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
#pragma nv_exec_check_disable
|
| 221 |
+
template <class _F>
|
| 222 |
+
__device__ __host__
|
| 223 |
+
static bool __is_empty_functor(const _F& /*__p*/)
|
| 224 |
+
{
|
| 225 |
+
return false;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
#pragma nv_exec_check_disable
|
| 229 |
+
template <class _F>
|
| 230 |
+
__device__ __host__
|
| 231 |
+
static bool __is_empty_functor(const _F* __p)
|
| 232 |
+
{
|
| 233 |
+
return !__p;
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
#pragma nv_exec_check_disable
|
| 237 |
+
template <class _Res, class _C>
|
| 238 |
+
__device__ __host__
|
| 239 |
+
static bool __is_empty_functor(const _Res _C::* __p)
|
| 240 |
+
{
|
| 241 |
+
return !__p;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
#pragma nv_exec_check_disable
|
| 245 |
+
template <class _Res, class... _Args>
|
| 246 |
+
__device__ __host__
|
| 247 |
+
static bool __is_empty_functor(const function<_Res(_Args...)>& __p)
|
| 248 |
+
{
|
| 249 |
+
return !__p;
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
template <class _F>
|
| 253 |
+
struct __make_cloner
|
| 254 |
+
{
|
| 255 |
+
#pragma nv_exec_check_disable
|
| 256 |
+
__device__ __host__
|
| 257 |
+
static void __clone_data(function &__dest, const function &__src)
|
| 258 |
+
{
|
| 259 |
+
if (__dest.__use_small_functor_data<_F>()) {
|
| 260 |
+
__dest.__obj = __dest.__get_small_functor_data();
|
| 261 |
+
new (__dest.__obj) _F(__src.__get_functor<_F>(__src.__obj));
|
| 262 |
+
}
|
| 263 |
+
else {
|
| 264 |
+
__dest.__obj = new _F(__src.__get_functor<_F>(__src.__obj));
|
| 265 |
+
}
|
| 266 |
+
}
|
| 267 |
+
};
|
| 268 |
+
|
| 269 |
+
template <class _F>
|
| 270 |
+
struct __make_destructor
|
| 271 |
+
{
|
| 272 |
+
#pragma nv_exec_check_disable
|
| 273 |
+
__device__ __host__
|
| 274 |
+
static void __destruct(function *__fn)
|
| 275 |
+
{
|
| 276 |
+
if (__fn->__use_small_functor_data<_F>()) {
|
| 277 |
+
(__fn->__get_functor<_F>(__fn->__obj)).~_F();
|
| 278 |
+
}
|
| 279 |
+
else {
|
| 280 |
+
delete (_F*)(__fn->__obj);
|
| 281 |
+
}
|
| 282 |
+
}
|
| 283 |
+
};
|
| 284 |
+
|
| 285 |
+
// We cannot simple define __make_functor in the following way:
|
| 286 |
+
// template <class _T, _F>
|
| 287 |
+
// __make_functor;
|
| 288 |
+
// template <class _RetType1, class _F, class... _ArgTypes1>
|
| 289 |
+
// struct __make_functor<_RetType1(_ArgTypes1...), _F>
|
| 290 |
+
//
|
| 291 |
+
// because VS 2013 cannot unpack _RetType1(_ArgTypes1...)
|
| 292 |
+
template <class _RetType1, class _F, class... _ArgTypes1>
|
| 293 |
+
struct __make_functor
|
| 294 |
+
{
|
| 295 |
+
typedef _RetType1 type;
|
| 296 |
+
|
| 297 |
+
#pragma nv_exec_check_disable
|
| 298 |
+
__device__ __host__
|
| 299 |
+
static _RetType1 __invoke(void *__d, _ArgTypes1... __args)
|
| 300 |
+
{
|
| 301 |
+
return __get_functor<_F>(__d)(
|
| 302 |
+
internal::forward<_ArgTypes1>(__args)...);
|
| 303 |
+
}
|
| 304 |
+
};
|
| 305 |
+
|
| 306 |
+
template <class _RetType1, class _C, class _M, class... _ArgTypes1>
|
| 307 |
+
struct __make_functor<_RetType1, _M _C::*,_ArgTypes1...>
|
| 308 |
+
{
|
| 309 |
+
typedef _RetType1 type;
|
| 310 |
+
typedef _RetType1(*_Fn)(_ArgTypes1...);
|
| 311 |
+
|
| 312 |
+
#pragma nv_exec_check_disable
|
| 313 |
+
__device__ __host__
|
| 314 |
+
static _RetType1 __invoke(void *__d, _ArgTypes1... __args)
|
| 315 |
+
{
|
| 316 |
+
return __get_functor<_Fn>(__d)(
|
| 317 |
+
internal::forward<_ArgTypes1>(__args)...);
|
| 318 |
+
}
|
| 319 |
+
};
|
| 320 |
+
|
| 321 |
+
// workaround for GCC version below 4.8
|
| 322 |
+
#if (__GNUC__ == 4) && (__GNUC_MINOR__ < 8)
|
| 323 |
+
template <class _F>
|
| 324 |
+
struct __check_callability
|
| 325 |
+
: public std::integral_constant<bool,
|
| 326 |
+
!std::is_same<_F, std::nullptr_t>::value>
|
| 327 |
+
{ };
|
| 328 |
+
#elif defined(_MSC_VER)
|
| 329 |
+
// simulate VC 2013's behavior...
|
| 330 |
+
template <class _F>
|
| 331 |
+
struct __check_callability1
|
| 332 |
+
: public
|
| 333 |
+
std::integral_constant<bool,
|
| 334 |
+
// std::result_of does not handle member pointers well
|
| 335 |
+
std::is_member_pointer<_F>::value ||
|
| 336 |
+
std::is_convertible<
|
| 337 |
+
_RetType,
|
| 338 |
+
typename std::result_of<_F(_ArgTypes...)>::type
|
| 339 |
+
>::value
|
| 340 |
+
>
|
| 341 |
+
{ };
|
| 342 |
+
|
| 343 |
+
template <class _F>
|
| 344 |
+
struct __check_callability
|
| 345 |
+
: public std::integral_constant<
|
| 346 |
+
bool,
|
| 347 |
+
!std::is_same<_F, function>::value &&
|
| 348 |
+
__check_callability1<typename std::remove_cv<_F>::type>::value>
|
| 349 |
+
{ };
|
| 350 |
+
#else /* !((__GNUC__ == 4) && (__GNUC_MINOR__ < 8)) _MSC_VER */
|
| 351 |
+
template <class _F,
|
| 352 |
+
class _T = typename std::result_of<_F(_ArgTypes...)>::type>
|
| 353 |
+
struct __check_callability
|
| 354 |
+
: public std::integral_constant<
|
| 355 |
+
bool,
|
| 356 |
+
!std::is_same<_F, function>::value &&
|
| 357 |
+
std::is_convertible< _T, _RetType>::value>
|
| 358 |
+
{ };
|
| 359 |
+
#endif /* __GNUC__ == 4) && (__GNUC_MINOR__ < 8) */
|
| 360 |
+
|
| 361 |
+
#pragma nv_exec_check_disable
|
| 362 |
+
__device__ __host__
|
| 363 |
+
void __destroy()
|
| 364 |
+
{
|
| 365 |
+
if (__obj) {
|
| 366 |
+
__destructor(this);
|
| 367 |
+
__obj = 0;
|
| 368 |
+
}
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
#pragma nv_exec_check_disable
|
| 372 |
+
__device__ __host__
|
| 373 |
+
void __clear()
|
| 374 |
+
{
|
| 375 |
+
__obj = 0;
|
| 376 |
+
__meta_fn = 0;
|
| 377 |
+
__cloner = 0;
|
| 378 |
+
__destructor = 0;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
public:
|
| 382 |
+
typedef _RetType result_type;
|
| 383 |
+
|
| 384 |
+
/*
|
| 385 |
+
* These typedef(s) are derived from __maybe_base_function
|
| 386 |
+
* typedef T1 argument_type; // only if sizeof...(ArgTypes) == 1 and
|
| 387 |
+
* // the type in ArgTypes is T1
|
| 388 |
+
* typedef T1 first_argument_type; // only if sizeof...(ArgTypes) == 2 and
|
| 389 |
+
* // ArgTypes contains T1 and T2
|
| 390 |
+
* typedef T2 second_argument_type; // only if sizeof...(ArgTypes) == 2 and
|
| 391 |
+
* // ArgTypes contains T1 and T2
|
| 392 |
+
*/
|
| 393 |
+
|
| 394 |
+
// 20.8.11.2.1 construct/copy/destroy [func.wrap.con]
|
| 395 |
+
|
| 396 |
+
#pragma nv_exec_check_disable
|
| 397 |
+
__device__ __host__
|
| 398 |
+
function() __NV_NOEXCEPT
|
| 399 |
+
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {}
|
| 400 |
+
|
| 401 |
+
#pragma nv_exec_check_disable
|
| 402 |
+
__device__ __host__
|
| 403 |
+
function(std::nullptr_t) __NV_NOEXCEPT
|
| 404 |
+
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0) {}
|
| 405 |
+
|
| 406 |
+
#pragma nv_exec_check_disable
|
| 407 |
+
__device__ __host__
|
| 408 |
+
function(const function &__fn)
|
| 409 |
+
{
|
| 410 |
+
if (__fn.__obj == 0) {
|
| 411 |
+
__clear();
|
| 412 |
+
}
|
| 413 |
+
else {
|
| 414 |
+
__meta_fn = __fn.__meta_fn;
|
| 415 |
+
__destructor = __fn.__destructor;
|
| 416 |
+
__fn.__cloner(*this, __fn);
|
| 417 |
+
__cloner = __fn.__cloner;
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
#pragma nv_exec_check_disable
|
| 422 |
+
__device__ __host__
|
| 423 |
+
function(function &&__fn)
|
| 424 |
+
{
|
| 425 |
+
__fn.swap(*this);
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
// VS 2013 cannot process __check_callability type trait.
|
| 429 |
+
// So, we check callability using static_assert instead of
|
| 430 |
+
// using SFINAE such as
|
| 431 |
+
// template<class _F,
|
| 432 |
+
// class = typename std::enable_if<
|
| 433 |
+
// __check_callability<_F>::value
|
| 434 |
+
// >::type>
|
| 435 |
+
|
| 436 |
+
#pragma nv_exec_check_disable
|
| 437 |
+
template<class _F>
|
| 438 |
+
__device__ __host__
|
| 439 |
+
function(_F);
|
| 440 |
+
|
| 441 |
+
// copy and swap
|
| 442 |
+
#pragma nv_exec_check_disable
|
| 443 |
+
__device__ __host__
|
| 444 |
+
function& operator=(const function& __fn)
|
| 445 |
+
{
|
| 446 |
+
function(__fn).swap(*this);
|
| 447 |
+
return *this;
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
#pragma nv_exec_check_disable
|
| 451 |
+
__device__ __host__
|
| 452 |
+
function& operator=(function&& __fn)
|
| 453 |
+
{
|
| 454 |
+
function(internal::move(__fn)).swap(*this);
|
| 455 |
+
return *this;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
#pragma nv_exec_check_disable
|
| 459 |
+
__device__ __host__
|
| 460 |
+
function& operator=(std::nullptr_t)
|
| 461 |
+
{
|
| 462 |
+
__destroy();
|
| 463 |
+
return *this;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
#pragma nv_exec_check_disable
|
| 467 |
+
template<class _F>
|
| 468 |
+
__device__ __host__
|
| 469 |
+
function&
|
| 470 |
+
operator=(_F&& __fn)
|
| 471 |
+
{
|
| 472 |
+
static_assert(__check_callability<_F>::value,
|
| 473 |
+
"Unable to create functor object!");
|
| 474 |
+
function(internal::forward<_F>(__fn)).swap(*this);
|
| 475 |
+
return *this;
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
#pragma nv_exec_check_disable
|
| 479 |
+
__device__ __host__
|
| 480 |
+
~function()
|
| 481 |
+
{
|
| 482 |
+
__destroy();
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
// 20.8.11.2.2 function modifiers [func.wrap.func.mod]
|
| 486 |
+
#pragma nv_exec_check_disable
|
| 487 |
+
__device__ __host__
|
| 488 |
+
void swap(function& __fn) __NV_NOEXCEPT
|
| 489 |
+
{
|
| 490 |
+
internal::swap(__meta_fn, __fn.__meta_fn);
|
| 491 |
+
internal::swap(__cloner, __fn.__cloner);
|
| 492 |
+
internal::swap(__destructor, __fn.__destructor);
|
| 493 |
+
|
| 494 |
+
if (__is_small_functor_data() && __fn.__is_small_functor_data()) {
|
| 495 |
+
internal::swap(__small_functor_data, __fn.__small_functor_data);
|
| 496 |
+
}
|
| 497 |
+
else if (__is_small_functor_data()) {
|
| 498 |
+
internal::swap(__small_functor_data, __fn.__small_functor_data);
|
| 499 |
+
internal::swap(__obj, __fn.__obj);
|
| 500 |
+
__fn.__obj = __fn.__get_small_functor_data();
|
| 501 |
+
}
|
| 502 |
+
else if (__fn.__is_small_functor_data()) {
|
| 503 |
+
internal::swap(__small_functor_data, __fn.__small_functor_data);
|
| 504 |
+
internal::swap(__obj, __fn.__obj);
|
| 505 |
+
__obj = __get_small_functor_data();
|
| 506 |
+
}
|
| 507 |
+
else {
|
| 508 |
+
internal::swap(__obj, __fn.__obj);
|
| 509 |
+
}
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
// 20.8.11.2.3 function capacity [func.wrap.func.cap]
|
| 513 |
+
#pragma nv_exec_check_disable
|
| 514 |
+
__device__ __host__
|
| 515 |
+
explicit operator bool() const __NV_NOEXCEPT
|
| 516 |
+
{
|
| 517 |
+
return __obj;
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
// 20.8.11.2.4 function invocation [func.wrap.func.inv]
|
| 521 |
+
// function::operator() can only be called in device code
|
| 522 |
+
// to avoid cross-execution space calls
|
| 523 |
+
#pragma nv_exec_check_disable
|
| 524 |
+
__device__ __host__
|
| 525 |
+
_RetType operator()(_ArgTypes...) const;
|
| 526 |
+
|
| 527 |
+
};
|
| 528 |
+
|
| 529 |
+
// Out-of-line definitions
|
| 530 |
+
#pragma nv_exec_check_disable
|
| 531 |
+
template<class _RetType, class... _ArgTypes>
|
| 532 |
+
template<class _F>
|
| 533 |
+
__device__ __host__
|
| 534 |
+
function<_RetType(_ArgTypes...)>::function(_F __fn)
|
| 535 |
+
: __obj(0), __meta_fn(0), __cloner(0), __destructor(0)
|
| 536 |
+
{
|
| 537 |
+
static_assert(__check_callability<_F>::value,
|
| 538 |
+
"Unable to construct functor object!");
|
| 539 |
+
if (__is_empty_functor(__fn))
|
| 540 |
+
return;
|
| 541 |
+
__meta_fn = &__make_functor<_RetType, _F, _ArgTypes...>::__invoke;
|
| 542 |
+
__cloner = &__make_cloner<_F>::__clone_data;
|
| 543 |
+
__destructor = &__make_destructor<_F>::__destruct;
|
| 544 |
+
|
| 545 |
+
if (__use_small_functor_data<_F>()) {
|
| 546 |
+
__obj = __get_small_functor_data();
|
| 547 |
+
new ((void*)__obj) _F(internal::move(__fn));
|
| 548 |
+
}
|
| 549 |
+
else {
|
| 550 |
+
__obj = new _F(internal::move(__fn));
|
| 551 |
+
}
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
#pragma nv_exec_check_disable
|
| 555 |
+
template <class _RetType, class..._ArgTypes>
|
| 556 |
+
__device__ __host__
|
| 557 |
+
_RetType
|
| 558 |
+
function<_RetType(_ArgTypes...)>::operator()(_ArgTypes... __args) const
|
| 559 |
+
{
|
| 560 |
+
return __meta_fn(__obj, internal::forward<_ArgTypes>(__args)...);
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
// 20.8.11.2.6, Null pointer comparisons:
|
| 564 |
+
|
| 565 |
+
#pragma nv_exec_check_disable
|
| 566 |
+
template <class _R, class... _ArgTypes>
|
| 567 |
+
__device__ __host__
|
| 568 |
+
bool operator==(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t)
|
| 569 |
+
__NV_NOEXCEPT
|
| 570 |
+
{
|
| 571 |
+
return !__fn;
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
#pragma nv_exec_check_disable
|
| 575 |
+
template <class _R, class... _ArgTypes>
|
| 576 |
+
__device__ __host__
|
| 577 |
+
bool operator==(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn)
|
| 578 |
+
__NV_NOEXCEPT
|
| 579 |
+
{
|
| 580 |
+
return !__fn;
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
#pragma nv_exec_check_disable
|
| 584 |
+
template <class _R, class... _ArgTypes>
|
| 585 |
+
__device__ __host__
|
| 586 |
+
bool operator!=(const function<_R(_ArgTypes...)>& __fn, std::nullptr_t)
|
| 587 |
+
__NV_NOEXCEPT
|
| 588 |
+
{
|
| 589 |
+
return static_cast<bool>(__fn);
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
#pragma nv_exec_check_disable
|
| 593 |
+
template <class _R, class... _ArgTypes>
|
| 594 |
+
__device__ __host__
|
| 595 |
+
bool operator!=(std::nullptr_t, const function<_R(_ArgTypes...)>& __fn)
|
| 596 |
+
__NV_NOEXCEPT
|
| 597 |
+
{
|
| 598 |
+
return static_cast<bool>(__fn);
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
// 20.8.11.2.7, specialized algorithms:
|
| 602 |
+
#pragma nv_exec_check_disable
|
| 603 |
+
template <class _R, class... _ArgTypes>
|
| 604 |
+
__device__ __host__
|
| 605 |
+
void swap(function<_R(_ArgTypes...)>& __fn1, function<_R(_ArgTypes...)>& __fn2)
|
| 606 |
+
{
|
| 607 |
+
__fn1.swap(__fn2);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
} // namespace nvstd
|
| 611 |
+
|
| 612 |
+
#undef __NV_NOEXCEPT
|
| 613 |
+
#undef __NV_CONSTEXPR
|
| 614 |
+
#undef __NV_ALIGNOF
|
| 615 |
+
|
| 616 |
+
#endif // __NV_LIBCXX_FUNCTIONAL_H__
|
| 617 |
+
|
| 618 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__)
|
| 619 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 620 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_NV_LIBCXX_FUNCTIONAL_H__
|
| 621 |
+
#endif
|
miniCUDA124/include/crt/sm_70_rt.h
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2018 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
|
| 51 |
+
#define EXCLUDE_FROM_RTC
|
| 52 |
+
|
| 53 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 54 |
+
#if defined(_MSC_VER)
|
| 55 |
+
#pragma message("crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 56 |
+
#else
|
| 57 |
+
#warning "crt/sm_70_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 58 |
+
#endif
|
| 59 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 60 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
#if !defined(__SM_70_RT_H__)
|
| 64 |
+
#define __SM_70_RT_H__
|
| 65 |
+
|
| 66 |
+
#if defined(__CUDACC_RTC__)
|
| 67 |
+
#define __SM_70_RT_DECL__ __host__ __device__
|
| 68 |
+
#elif defined(_NVHPC_CUDA)
|
| 69 |
+
#define __SM_70_RT_DECL__ extern __device__ __cudart_builtin__
|
| 70 |
+
#else /* !__CUDACC_RTC__ */
|
| 71 |
+
#define __SM_70_RT_DECL__ static __device__ __inline__
|
| 72 |
+
#endif /* __CUDACC_RTC__ */
|
| 73 |
+
|
| 74 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 75 |
+
|
| 76 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
|
| 77 |
+
|
| 78 |
+
/*******************************************************************************
|
| 79 |
+
* *
|
| 80 |
+
* *
|
| 81 |
+
* *
|
| 82 |
+
*******************************************************************************/
|
| 83 |
+
|
| 84 |
+
#include "builtin_types.h"
|
| 85 |
+
#include "device_types.h"
|
| 86 |
+
#include "host_defines.h"
|
| 87 |
+
|
| 88 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 89 |
+
#define __DEF_IF_HOST { }
|
| 90 |
+
#else /* !__CUDA_ARCH__ */
|
| 91 |
+
#define __DEF_IF_HOST ;
|
| 92 |
+
#endif /* __CUDA_ARCH__ */
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
/******************************************************************************
|
| 96 |
+
* match *
|
| 97 |
+
******************************************************************************/
|
| 98 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 99 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) __DEF_IF_HOST
|
| 100 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) __DEF_IF_HOST
|
| 101 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) __DEF_IF_HOST
|
| 102 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) __DEF_IF_HOST
|
| 103 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) __DEF_IF_HOST
|
| 104 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) __DEF_IF_HOST
|
| 105 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) __DEF_IF_HOST
|
| 106 |
+
|
| 107 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) __DEF_IF_HOST
|
| 108 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) __DEF_IF_HOST
|
| 109 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) __DEF_IF_HOST
|
| 110 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) __DEF_IF_HOST
|
| 111 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) __DEF_IF_HOST
|
| 112 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) __DEF_IF_HOST
|
| 113 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) __DEF_IF_HOST
|
| 114 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) __DEF_IF_HOST
|
| 115 |
+
|
| 116 |
+
__SM_70_RT_DECL__ void __nanosleep(unsigned int ns) __DEF_IF_HOST
|
| 117 |
+
|
| 118 |
+
__SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) __DEF_IF_HOST
|
| 119 |
+
|
| 120 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
|
| 121 |
+
|
| 122 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 123 |
+
|
| 124 |
+
#undef __DEF_IF_HOST
|
| 125 |
+
#undef __SM_70_RT_DECL__
|
| 126 |
+
|
| 127 |
+
#if (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA)
|
| 128 |
+
#include "sm_70_rt.hpp"
|
| 129 |
+
#endif /* (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) */
|
| 130 |
+
|
| 131 |
+
#endif /* !__SM_70_RT_H__ */
|
| 132 |
+
|
| 133 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__)
|
| 134 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 135 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_H__
|
| 136 |
+
#endif
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
#undef EXCLUDE_FROM_RTC
|
miniCUDA124/include/crt/sm_70_rt.hpp
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/sm_70_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__SM_70_RT_HPP__)
|
| 61 |
+
#define __SM_70_RT_HPP__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC_RTC__)
|
| 64 |
+
#define __SM_70_RT_DECL__ __host__ __device__
|
| 65 |
+
#else /* !__CUDACC_RTC__ */
|
| 66 |
+
#define __SM_70_RT_DECL__ static __device__ __inline__
|
| 67 |
+
#endif /* __CUDACC_RTC__ */
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 700
|
| 72 |
+
|
| 73 |
+
/*******************************************************************************
|
| 74 |
+
* *
|
| 75 |
+
* *
|
| 76 |
+
* *
|
| 77 |
+
*******************************************************************************/
|
| 78 |
+
|
| 79 |
+
#include "builtin_types.h"
|
| 80 |
+
#include "device_types.h"
|
| 81 |
+
#include "host_defines.h"
|
| 82 |
+
|
| 83 |
+
/*******************************************************************************
|
| 84 |
+
* *
|
| 85 |
+
* Below are implementations of SM-7.0 builtin functions which are included as *
|
| 86 |
+
* source (instead of being built in to the compiler) *
|
| 87 |
+
* *
|
| 88 |
+
*******************************************************************************/
|
| 89 |
+
|
| 90 |
+
//
|
| 91 |
+
// __match_any_sync
|
| 92 |
+
//
|
| 93 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned value) {
|
| 94 |
+
return __match32_any_sync(mask, value);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, int value) {
|
| 98 |
+
return __match32_any_sync(mask, value);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long value) {
|
| 102 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 103 |
+
__match64_any_sync(mask, (unsigned long long)value):
|
| 104 |
+
__match32_any_sync(mask, (unsigned)value);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long value) {
|
| 108 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 109 |
+
__match64_any_sync(mask, (unsigned long long)value):
|
| 110 |
+
__match32_any_sync(mask, (unsigned)value);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, unsigned long long value) {
|
| 114 |
+
return __match64_any_sync(mask, value);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, long long value) {
|
| 118 |
+
return __match64_any_sync(mask, value);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, float value) {
|
| 122 |
+
return __match32_any_sync(mask, __float_as_uint(value));
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__SM_70_RT_DECL__ unsigned int __match_any_sync(unsigned mask, double value) {
|
| 126 |
+
return __match64_any_sync(mask, __double_as_longlong(value));
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
//
|
| 130 |
+
// __match_all_sync
|
| 131 |
+
//
|
| 132 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned value, int *pred) {
|
| 133 |
+
return __match32_all_sync(mask, value, pred);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, int value, int *pred) {
|
| 137 |
+
return __match32_all_sync(mask, value, pred);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long value, int *pred) {
|
| 141 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 142 |
+
__match64_all_sync(mask, (unsigned long long)value, pred):
|
| 143 |
+
__match32_all_sync(mask, (unsigned)value, pred);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long value, int *pred) {
|
| 147 |
+
return (sizeof(long) == sizeof(long long)) ?
|
| 148 |
+
__match64_all_sync(mask, (unsigned long long)value, pred):
|
| 149 |
+
__match32_all_sync(mask, (unsigned)value, pred);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, unsigned long long value, int *pred) {
|
| 153 |
+
return __match64_all_sync(mask, value, pred);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, long long value, int *pred) {
|
| 157 |
+
return __match64_all_sync(mask, value, pred);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, float value, int *pred) {
|
| 161 |
+
return __match32_all_sync(mask, __float_as_uint(value), pred);
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
__SM_70_RT_DECL__ unsigned int __match_all_sync(unsigned mask, double value, int *pred) {
|
| 165 |
+
return __match64_all_sync(mask, __double_as_longlong(value), pred);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
__SM_70_RT_DECL__ void __nanosleep(unsigned int ns) {
|
| 169 |
+
asm volatile("nanosleep.u32 %0;" :: "r"(ns));
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
extern "C" __device__ __device_builtin__
|
| 174 |
+
unsigned short __usAtomicCAS(unsigned short *, unsigned short, unsigned short);
|
| 175 |
+
|
| 176 |
+
__SM_70_RT_DECL__ unsigned short int atomicCAS(unsigned short int *address, unsigned short int compare, unsigned short int val) {
|
| 177 |
+
return __usAtomicCAS(address, compare, val);
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 700 */
|
| 182 |
+
|
| 183 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 184 |
+
|
| 185 |
+
#undef __SM_70_RT_DECL__
|
| 186 |
+
|
| 187 |
+
#endif /* !__SM_70_RT_HPP__ */
|
| 188 |
+
|
| 189 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__)
|
| 190 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 191 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_70_RT_HPP__
|
| 192 |
+
#endif
|
miniCUDA124/include/crt/sm_80_rt.h
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/sm_80_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__SM_80_RT_H__)
|
| 61 |
+
#define __SM_80_RT_H__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC_RTC__)
|
| 64 |
+
#define __SM_80_RT_DECL__ __host__ __device__
|
| 65 |
+
#elif defined(_NVHPC_CUDA)
|
| 66 |
+
#define __SM_80_RT_DECL__ extern __device__ __cudart_builtin__
|
| 67 |
+
#else /* !__CUDACC_RTC__ */
|
| 68 |
+
#define __SM_80_RT_DECL__ static __device__ __inline__
|
| 69 |
+
#endif /* __CUDACC_RTC__ */
|
| 70 |
+
|
| 71 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 72 |
+
|
| 73 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
|
| 74 |
+
|
| 75 |
+
/*******************************************************************************
|
| 76 |
+
* *
|
| 77 |
+
* *
|
| 78 |
+
* *
|
| 79 |
+
*******************************************************************************/
|
| 80 |
+
|
| 81 |
+
#include "builtin_types.h"
|
| 82 |
+
#include "device_types.h"
|
| 83 |
+
#include "host_defines.h"
|
| 84 |
+
|
| 85 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 86 |
+
#define __DEF_IF_HOST { }
|
| 87 |
+
#else /* !__CUDA_ARCH__ */
|
| 88 |
+
#define __DEF_IF_HOST ;
|
| 89 |
+
#endif /* __CUDA_ARCH__ */
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
|
| 93 |
+
#define EXCLUDE_FROM_RTC
|
| 94 |
+
/******************************************************************************
|
| 95 |
+
* reduce *
|
| 96 |
+
******************************************************************************/
|
| 97 |
+
__SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 98 |
+
__SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 99 |
+
__SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 100 |
+
|
| 101 |
+
__SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) __DEF_IF_HOST
|
| 102 |
+
__SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) __DEF_IF_HOST
|
| 103 |
+
__SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) __DEF_IF_HOST
|
| 104 |
+
|
| 105 |
+
__SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 106 |
+
__SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 107 |
+
__SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) __DEF_IF_HOST
|
| 108 |
+
|
| 109 |
+
#undef EXCLUDE_FROM_RTC
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
extern "C" {
|
| 113 |
+
inline __device__ void *__nv_associate_access_property(const void *ptr,
|
| 114 |
+
unsigned long long property) {
|
| 115 |
+
extern __device__ void *__nv_associate_access_property_impl(const void *,
|
| 116 |
+
unsigned long long);
|
| 117 |
+
return __nv_associate_access_property_impl(ptr, property);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
inline __device__ void __nv_memcpy_async_shared_global_4(void *dst,
|
| 121 |
+
const void *src,
|
| 122 |
+
unsigned src_size) {
|
| 123 |
+
extern __device__ void __nv_memcpy_async_shared_global_4_impl(void *,
|
| 124 |
+
const void *,
|
| 125 |
+
unsigned);
|
| 126 |
+
__nv_memcpy_async_shared_global_4_impl(dst, src, src_size);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
inline __device__ void __nv_memcpy_async_shared_global_8(void *dst,
|
| 130 |
+
const void *src,
|
| 131 |
+
unsigned src_size) {
|
| 132 |
+
extern __device__ void __nv_memcpy_async_shared_global_8_impl(void *,
|
| 133 |
+
const void *,
|
| 134 |
+
unsigned);
|
| 135 |
+
__nv_memcpy_async_shared_global_8_impl(dst, src, src_size);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
inline __device__ void __nv_memcpy_async_shared_global_16(void *dst,
|
| 139 |
+
const void *src,
|
| 140 |
+
unsigned src_size) {
|
| 141 |
+
extern __device__ void __nv_memcpy_async_shared_global_16_impl(void *,
|
| 142 |
+
const void *,
|
| 143 |
+
unsigned);
|
| 144 |
+
__nv_memcpy_async_shared_global_16_impl(dst, src, src_size);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
}
|
| 148 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */
|
| 149 |
+
|
| 150 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 151 |
+
|
| 152 |
+
#undef __DEF_IF_HOST
|
| 153 |
+
#undef __SM_80_RT_DECL__
|
| 154 |
+
|
| 155 |
+
#if (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA)
|
| 156 |
+
#include "sm_80_rt.hpp"
|
| 157 |
+
#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
|
| 158 |
+
|
| 159 |
+
#endif /* !__SM_80_RT_H__ */
|
| 160 |
+
|
| 161 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__)
|
| 162 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 163 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_H__
|
| 164 |
+
#endif
|
miniCUDA124/include/crt/sm_80_rt.hpp
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2017-2021 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/sm_80_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__SM_80_RT_HPP__)
|
| 61 |
+
#define __SM_80_RT_HPP__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC_RTC__)
|
| 64 |
+
#define __SM_80_RT_DECL__ __host__ __device__
|
| 65 |
+
#else /* !__CUDACC_RTC__ */
|
| 66 |
+
#define __SM_80_RT_DECL__ static __device__ __inline__
|
| 67 |
+
#endif /* __CUDACC_RTC__ */
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
|
| 72 |
+
|
| 73 |
+
/*******************************************************************************
|
| 74 |
+
* *
|
| 75 |
+
* *
|
| 76 |
+
* *
|
| 77 |
+
*******************************************************************************/
|
| 78 |
+
|
| 79 |
+
#include "builtin_types.h"
|
| 80 |
+
#include "device_types.h"
|
| 81 |
+
#include "host_defines.h"
|
| 82 |
+
|
| 83 |
+
/*******************************************************************************
|
| 84 |
+
* *
|
| 85 |
+
* Below are implementations of SM-8.0 builtin functions which are included as *
|
| 86 |
+
* source (instead of being built in to the compiler) *
|
| 87 |
+
* *
|
| 88 |
+
*******************************************************************************/
|
| 89 |
+
|
| 90 |
+
extern "C" {
|
| 91 |
+
__device_builtin__ __device__ unsigned __reduce_add_sync_unsigned_impl(unsigned, unsigned);
|
| 92 |
+
__device_builtin__ __device__ unsigned __reduce_min_sync_unsigned_impl(unsigned, unsigned);
|
| 93 |
+
__device_builtin__ __device__ unsigned __reduce_max_sync_unsigned_impl(unsigned, unsigned);
|
| 94 |
+
__device_builtin__ __device__ int __reduce_add_sync_signed_impl(unsigned, int);
|
| 95 |
+
__device_builtin__ __device__ int __reduce_min_sync_signed_impl(unsigned, int);
|
| 96 |
+
__device_builtin__ __device__ int __reduce_max_sync_signed_impl(unsigned, int);
|
| 97 |
+
__device_builtin__ __device__ unsigned __reduce_or_sync_unsigned_impl(unsigned, unsigned);
|
| 98 |
+
__device_builtin__ __device__ unsigned __reduce_and_sync_unsigned_impl(unsigned, unsigned);
|
| 99 |
+
__device_builtin__ __device__ unsigned __reduce_xor_sync_unsigned_impl(unsigned, unsigned);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
__SM_80_RT_DECL__ unsigned __reduce_add_sync(unsigned mask, unsigned value) {
|
| 103 |
+
return __reduce_add_sync_unsigned_impl(mask, value);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
__SM_80_RT_DECL__ unsigned __reduce_min_sync(unsigned mask, unsigned value) {
|
| 107 |
+
return __reduce_min_sync_unsigned_impl(mask, value);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
__SM_80_RT_DECL__ unsigned __reduce_max_sync(unsigned mask, unsigned value) {
|
| 111 |
+
return __reduce_max_sync_unsigned_impl(mask, value);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
__SM_80_RT_DECL__ int __reduce_add_sync(unsigned mask, int value) {
|
| 115 |
+
return __reduce_add_sync_signed_impl(mask, value);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
__SM_80_RT_DECL__ int __reduce_min_sync(unsigned mask, int value) {
|
| 119 |
+
return __reduce_min_sync_signed_impl(mask, value);
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
__SM_80_RT_DECL__ int __reduce_max_sync(unsigned mask, int value) {
|
| 123 |
+
return __reduce_max_sync_signed_impl(mask, value);
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
__SM_80_RT_DECL__ unsigned __reduce_and_sync(unsigned mask, unsigned value) {
|
| 127 |
+
return __reduce_and_sync_unsigned_impl(mask, value);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
__SM_80_RT_DECL__ unsigned __reduce_or_sync(unsigned mask, unsigned value) {
|
| 131 |
+
return __reduce_or_sync_unsigned_impl(mask, value);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
__SM_80_RT_DECL__ unsigned __reduce_xor_sync(unsigned mask, unsigned value) {
|
| 135 |
+
return __reduce_xor_sync_unsigned_impl(mask, value);
|
| 136 |
+
}
|
| 137 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 800 */
|
| 138 |
+
|
| 139 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 140 |
+
|
| 141 |
+
#undef __SM_80_RT_DECL__
|
| 142 |
+
|
| 143 |
+
#endif /* !__SM_80_RT_HPP__ */
|
| 144 |
+
|
| 145 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__)
|
| 146 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 147 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_80_RT_HPP__
|
| 148 |
+
#endif
|
miniCUDA124/include/crt/sm_90_rt.h
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2022-2023 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/sm_90_rt.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__SM_90_RT_H__)
|
| 61 |
+
#define __SM_90_RT_H__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC_RTC__)
|
| 64 |
+
#define __SM_90_RT_DECL__ __host__ __device__
|
| 65 |
+
#else /* !__CUDACC_RTC__ */
|
| 66 |
+
#define __SM_90_RT_DECL__ static __device__ __inline__
|
| 67 |
+
#endif /* __CUDACC_RTC__ */
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
|
| 72 |
+
|
| 73 |
+
/*******************************************************************************
|
| 74 |
+
* *
|
| 75 |
+
* *
|
| 76 |
+
* *
|
| 77 |
+
*******************************************************************************/
|
| 78 |
+
|
| 79 |
+
#include "builtin_types.h"
|
| 80 |
+
#include "device_types.h"
|
| 81 |
+
#include "host_defines.h"
|
| 82 |
+
|
| 83 |
+
#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA)
|
| 84 |
+
#define __DEF_IF_HOST { }
|
| 85 |
+
#else /* !__CUDA_ARCH__ && !_NVHPC_CUDA */
|
| 86 |
+
#define __DEF_IF_HOST ;
|
| 87 |
+
#endif /* __CUDA_ARCH__ || _NVHPC_CUDA */
|
| 88 |
+
|
| 89 |
+
//NOTE: For NVRTC, these declarations have been moved into the compiler (to reduce compile time)
|
| 90 |
+
#define EXCLUDE_FROM_RTC
|
| 91 |
+
|
| 92 |
+
__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr) __DEF_IF_HOST
|
| 93 |
+
__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr) __DEF_IF_HOST
|
| 94 |
+
__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr, unsigned target_block_rank) __DEF_IF_HOST
|
| 95 |
+
__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr) __DEF_IF_HOST
|
| 96 |
+
__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr, unsigned cluster_cta_mask) __DEF_IF_HOST
|
| 97 |
+
__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified() __DEF_IF_HOST
|
| 98 |
+
__SM_90_RT_DECL__ dim3 __clusterDim() __DEF_IF_HOST
|
| 99 |
+
__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx() __DEF_IF_HOST
|
| 100 |
+
__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters() __DEF_IF_HOST
|
| 101 |
+
__SM_90_RT_DECL__ dim3 __clusterIdx() __DEF_IF_HOST
|
| 102 |
+
__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank() __DEF_IF_HOST
|
| 103 |
+
__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks() __DEF_IF_HOST
|
| 104 |
+
__SM_90_RT_DECL__ void __cluster_barrier_arrive() __DEF_IF_HOST
|
| 105 |
+
__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed() __DEF_IF_HOST
|
| 106 |
+
__SM_90_RT_DECL__ void __cluster_barrier_wait() __DEF_IF_HOST
|
| 107 |
+
__SM_90_RT_DECL__ void __threadfence_cluster() __DEF_IF_HOST
|
| 108 |
+
|
| 109 |
+
__SM_90_RT_DECL__ float2 atomicAdd(float2 *__address, float2 val) __DEF_IF_HOST
|
| 110 |
+
__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *__address, float2 val) __DEF_IF_HOST
|
| 111 |
+
__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *__address, float2 val) __DEF_IF_HOST
|
| 112 |
+
__SM_90_RT_DECL__ float4 atomicAdd(float4 *__address, float4 val) __DEF_IF_HOST
|
| 113 |
+
__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *__address, float4 val) __DEF_IF_HOST
|
| 114 |
+
__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *__address, float4 val) __DEF_IF_HOST
|
| 115 |
+
|
| 116 |
+
#undef EXCLUDE_FROM_RTC
|
| 117 |
+
|
| 118 |
+
//Note: below atomic functions are templates, so cannot be represented in NVRTC
|
| 119 |
+
//builtins representation, so they have to be parsed on every NVRTC compilation.
|
| 120 |
+
//(notice 'EXCLUDE_FROM_RTC' ends above)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
#ifndef __NV_DISABLE_128_ATOMICS
|
| 124 |
+
// lgen definitions for 128b atomics
|
| 125 |
+
extern "C" {
|
| 126 |
+
__device__ __device_builtin__ void __u128AtomicCAS(void *, void *, void *, void *);
|
| 127 |
+
__device__ __device_builtin__ void __u128AtomicCAS_block(void *, void *, void *, void *);
|
| 128 |
+
__device__ __device_builtin__ void __u128AtomicCAS_system(void *, void *, void *, void *);
|
| 129 |
+
__device__ __device_builtin__ void __u128AtomicExch(void *, void *, void *);
|
| 130 |
+
__device__ __device_builtin__ void __u128AtomicExch_block(void *, void *, void *);
|
| 131 |
+
__device__ __device_builtin__ void __u128AtomicExch_system(void *, void *, void *);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
// macro to get address of object, to workaround situations where the type overloads the "&" operator
|
| 135 |
+
#define __NV_ATOMIC_ADDRESSOF(__val) \
|
| 136 |
+
(void *)(&(const_cast<char &>(reinterpret_cast<const volatile char &>(__val))))
|
| 137 |
+
|
| 138 |
+
// enable_if
|
| 139 |
+
template<bool __b, typename _T>
|
| 140 |
+
struct __nv_atomic_enable_if { };
|
| 141 |
+
|
| 142 |
+
template<typename _T>
|
| 143 |
+
struct __nv_atomic_enable_if<true, _T> { typedef _T __type; };
|
| 144 |
+
|
| 145 |
+
// alignof
|
| 146 |
+
#if defined(__CUDACC_RTC__)
|
| 147 |
+
#define __NV_ATOMIC_ALIGNOF __alignof__
|
| 148 |
+
#else
|
| 149 |
+
#define __NV_ATOMIC_ALIGNOF __alignof
|
| 150 |
+
#endif
|
| 151 |
+
|
| 152 |
+
// trivially copyable
|
| 153 |
+
template <typename _T>
|
| 154 |
+
struct __nv_atomic_triv_cp_helper {
|
| 155 |
+
#if defined(__GNUC__)
|
| 156 |
+
#if (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
|
| 157 |
+
static const bool __val = true;
|
| 158 |
+
#elif (__GNUC__ < 5)
|
| 159 |
+
static const bool __val = __has_trivial_copy(_T);
|
| 160 |
+
#else
|
| 161 |
+
static const bool __val = __is_trivially_copyable(_T);
|
| 162 |
+
#endif
|
| 163 |
+
#else
|
| 164 |
+
static const bool __val = __is_trivially_copyable(_T);
|
| 165 |
+
#endif
|
| 166 |
+
};
|
| 167 |
+
#define __NV_ATOMIC_TRIVIALLY_COPYABLE(_T) \
|
| 168 |
+
__nv_atomic_triv_cp_helper<_T>::__val
|
| 169 |
+
|
| 170 |
+
// return type
|
| 171 |
+
#if __cplusplus >= 202002L // C++20 or greater
|
| 172 |
+
#define __NV_ATOMIC_RET_TYPE(_T) _T
|
| 173 |
+
#else
|
| 174 |
+
#define __NV_ATOMIC_RET_TYPE(_T) typename \
|
| 175 |
+
__nv_atomic_enable_if<sizeof(_T) == 16 && \
|
| 176 |
+
__NV_ATOMIC_ALIGNOF(_T) >= 16 && \
|
| 177 |
+
__NV_ATOMIC_TRIVIALLY_COPYABLE(_T), _T>::__type
|
| 178 |
+
#endif
|
| 179 |
+
|
| 180 |
+
// requires
|
| 181 |
+
#if __cplusplus >= 202002L // C++20 or greater
|
| 182 |
+
#define __NV_ATOMIC_REQUIRES(_T) \
|
| 183 |
+
requires(sizeof(_T) == 16 && \
|
| 184 |
+
__NV_ATOMIC_ALIGNOF(_T) >= 16 && \
|
| 185 |
+
__NV_ATOMIC_TRIVIALLY_COPYABLE(_T))
|
| 186 |
+
#else
|
| 187 |
+
#define __NV_ATOMIC_REQUIRES(_T)
|
| 188 |
+
#endif
|
| 189 |
+
|
| 190 |
+
// temp value and return value
|
| 191 |
+
#if __cplusplus >= 201103L || defined(_MSC_VER) // C++11 or greater, or MSC
|
| 192 |
+
#define __NV_ATOMIC_TEMP(_T) union _U \
|
| 193 |
+
{_T __ret; __device__ __inline__ _U() {}}; _U __u
|
| 194 |
+
#define __NV_ATOMIC_RET(_T) __u.__ret
|
| 195 |
+
#else
|
| 196 |
+
#define __NV_ATOMIC_TEMP(_T) _T __ret
|
| 197 |
+
#define __NV_ATOMIC_RET(_T) __ret
|
| 198 |
+
#endif
|
| 199 |
+
|
| 200 |
+
// templated 128-bit atomics
|
| 201 |
+
template <typename _T>
|
| 202 |
+
__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
|
| 203 |
+
atomicCAS(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) {
|
| 204 |
+
__NV_ATOMIC_TEMP(_T);
|
| 205 |
+
__u128AtomicCAS((void *)(__address),
|
| 206 |
+
__NV_ATOMIC_ADDRESSOF(__compare),
|
| 207 |
+
__NV_ATOMIC_ADDRESSOF(__val),
|
| 208 |
+
__NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
|
| 209 |
+
return __NV_ATOMIC_RET(_T);
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
template <typename _T>
|
| 213 |
+
__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
|
| 214 |
+
atomicCAS_block(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) {
|
| 215 |
+
__NV_ATOMIC_TEMP(_T);
|
| 216 |
+
__u128AtomicCAS_block((void *)(__address),
|
| 217 |
+
__NV_ATOMIC_ADDRESSOF(__compare),
|
| 218 |
+
__NV_ATOMIC_ADDRESSOF(__val),
|
| 219 |
+
__NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
|
| 220 |
+
return __NV_ATOMIC_RET(_T);
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
template <typename _T>
|
| 224 |
+
__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
|
| 225 |
+
atomicCAS_system(_T *__address, _T __compare, _T __val) __NV_ATOMIC_REQUIRES(_T) {
|
| 226 |
+
__NV_ATOMIC_TEMP(_T);
|
| 227 |
+
__u128AtomicCAS_system((void *)(__address),
|
| 228 |
+
__NV_ATOMIC_ADDRESSOF(__compare),
|
| 229 |
+
__NV_ATOMIC_ADDRESSOF(__val),
|
| 230 |
+
__NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
|
| 231 |
+
return __NV_ATOMIC_RET(_T);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
template <typename _T>
|
| 235 |
+
__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
|
| 236 |
+
atomicExch(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) {
|
| 237 |
+
__NV_ATOMIC_TEMP(_T);
|
| 238 |
+
__u128AtomicExch((void *)(__address),
|
| 239 |
+
__NV_ATOMIC_ADDRESSOF(__val),
|
| 240 |
+
__NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
|
| 241 |
+
return __NV_ATOMIC_RET(_T);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
template <typename _T>
|
| 245 |
+
__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
|
| 246 |
+
atomicExch_block(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) {
|
| 247 |
+
__NV_ATOMIC_TEMP(_T);
|
| 248 |
+
__u128AtomicExch_block((void *)(__address),
|
| 249 |
+
__NV_ATOMIC_ADDRESSOF(__val),
|
| 250 |
+
__NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
|
| 251 |
+
return __NV_ATOMIC_RET(_T);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
template <typename _T>
|
| 255 |
+
__SM_90_RT_DECL__ __NV_ATOMIC_RET_TYPE(_T)
|
| 256 |
+
atomicExch_system(_T *__address, _T __val) __NV_ATOMIC_REQUIRES(_T) {
|
| 257 |
+
__NV_ATOMIC_TEMP(_T);
|
| 258 |
+
__u128AtomicExch_system((void *)(__address),
|
| 259 |
+
__NV_ATOMIC_ADDRESSOF(__val),
|
| 260 |
+
__NV_ATOMIC_ADDRESSOF(__NV_ATOMIC_RET(_T)));
|
| 261 |
+
return __NV_ATOMIC_RET(_T);
|
| 262 |
+
}
|
| 263 |
+
#endif /* !__NV_DISABLE_128_ATOMICS */
|
| 264 |
+
|
| 265 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */
|
| 266 |
+
|
| 267 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 268 |
+
|
| 269 |
+
#undef __DEF_IF_HOST
|
| 270 |
+
#undef __SM_90_RT_DECL__
|
| 271 |
+
|
| 272 |
+
#if (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA)
|
| 273 |
+
#include "sm_90_rt.hpp"
|
| 274 |
+
#endif /* (!defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)) || defined(_NVHPC_CUDA) */
|
| 275 |
+
|
| 276 |
+
#endif /* !__SM_90_RT_H__ */
|
| 277 |
+
|
| 278 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__)
|
| 279 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 280 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_H__
|
| 281 |
+
#endif
|
| 282 |
+
|
miniCUDA124/include/crt/sm_90_rt.hpp
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* Copyright 2022 NVIDIA Corporation. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NOTICE TO LICENSEE:
|
| 5 |
+
*
|
| 6 |
+
* This source code and/or documentation ("Licensed Deliverables") are
|
| 7 |
+
* subject to NVIDIA intellectual property rights under U.S. and
|
| 8 |
+
* international Copyright laws.
|
| 9 |
+
*
|
| 10 |
+
* These Licensed Deliverables contained herein is PROPRIETARY and
|
| 11 |
+
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
|
| 12 |
+
* conditions of a form of NVIDIA software license agreement by and
|
| 13 |
+
* between NVIDIA and Licensee ("License Agreement") or electronically
|
| 14 |
+
* accepted by Licensee. Notwithstanding any terms or conditions to
|
| 15 |
+
* the contrary in the License Agreement, reproduction or disclosure
|
| 16 |
+
* of the Licensed Deliverables to any third party without the express
|
| 17 |
+
* written consent of NVIDIA is prohibited.
|
| 18 |
+
*
|
| 19 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 20 |
+
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
|
| 21 |
+
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
|
| 22 |
+
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
|
| 23 |
+
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
|
| 24 |
+
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
|
| 25 |
+
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
|
| 26 |
+
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
|
| 27 |
+
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
|
| 28 |
+
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
|
| 29 |
+
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
| 30 |
+
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
| 31 |
+
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
| 32 |
+
* OF THESE LICENSED DELIVERABLES.
|
| 33 |
+
*
|
| 34 |
+
* U.S. Government End Users. These Licensed Deliverables are a
|
| 35 |
+
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
|
| 36 |
+
* 1995), consisting of "commercial computer software" and "commercial
|
| 37 |
+
* computer software documentation" as such terms are used in 48
|
| 38 |
+
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
|
| 39 |
+
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
|
| 40 |
+
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
|
| 41 |
+
* U.S. Government End Users acquire the Licensed Deliverables with
|
| 42 |
+
* only those rights set forth herein.
|
| 43 |
+
*
|
| 44 |
+
* Any use of the Licensed Deliverables in individual and commercial
|
| 45 |
+
* software must include, in the user documentation and internal
|
| 46 |
+
* comments to the code, the above Disclaimer and U.S. Government End
|
| 47 |
+
* Users Notice.
|
| 48 |
+
*/
|
| 49 |
+
|
| 50 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 51 |
+
#if defined(_MSC_VER)
|
| 52 |
+
#pragma message("crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 53 |
+
#else
|
| 54 |
+
#warning "crt/sm_90_rt.hpp is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 55 |
+
#endif
|
| 56 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 57 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__
|
| 58 |
+
#endif
|
| 59 |
+
|
| 60 |
+
#if !defined(__SM_90_RT_HPP__)
|
| 61 |
+
#define __SM_90_RT_HPP__
|
| 62 |
+
|
| 63 |
+
#if defined(__CUDACC_RTC__)
|
| 64 |
+
#define __SM_90_RT_DECL__ __host__ __device__
|
| 65 |
+
#else /* !__CUDACC_RTC__ */
|
| 66 |
+
#define __SM_90_RT_DECL__ static __device__ __inline__
|
| 67 |
+
#endif /* __CUDACC_RTC__ */
|
| 68 |
+
|
| 69 |
+
#if defined(__cplusplus) && defined(__CUDACC__)
|
| 70 |
+
|
| 71 |
+
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
|
| 72 |
+
|
| 73 |
+
/*******************************************************************************
|
| 74 |
+
* *
|
| 75 |
+
* *
|
| 76 |
+
* *
|
| 77 |
+
*******************************************************************************/
|
| 78 |
+
|
| 79 |
+
#include "builtin_types.h"
|
| 80 |
+
#include "device_types.h"
|
| 81 |
+
#include "host_defines.h"
|
| 82 |
+
|
| 83 |
+
/*******************************************************************************
|
| 84 |
+
* *
|
| 85 |
+
* Below are implementations of SM-9.0 builtin functions which are included as *
|
| 86 |
+
* source (instead of being built in to the compiler) *
|
| 87 |
+
* *
|
| 88 |
+
*******************************************************************************/
|
| 89 |
+
extern "C" {
|
| 90 |
+
__device__ unsigned __nv_isClusterShared_impl(const void *);
|
| 91 |
+
__device__ void * __nv_cluster_map_shared_rank_impl(const void *, unsigned);
|
| 92 |
+
__device__ unsigned __nv_cluster_query_shared_rank_impl(const void *);
|
| 93 |
+
__device__ unsigned __nv_clusterDimIsSpecifed_impl();
|
| 94 |
+
__device__ void __nv_clusterDim_impl(unsigned *, unsigned *, unsigned *);
|
| 95 |
+
__device__ void __nv_clusterRelativeBlockIdx_impl(unsigned *,
|
| 96 |
+
unsigned *, unsigned *);
|
| 97 |
+
__device__ void __nv_clusterGridDimInClusters_impl(unsigned *,
|
| 98 |
+
unsigned *, unsigned *);
|
| 99 |
+
__device__ void __nv_clusterIdx_impl(unsigned *, unsigned *, unsigned *);
|
| 100 |
+
__device__ unsigned __nv_clusterRelativeBlockRank_impl();
|
| 101 |
+
__device__ unsigned __nv_clusterSizeInBlocks_impl();
|
| 102 |
+
__device__ void __nv_cluster_barrier_arrive_impl();
|
| 103 |
+
__device__ void __nv_cluster_barrier_arrive_relaxed_impl();
|
| 104 |
+
__device__ void __nv_cluster_barrier_wait_impl();
|
| 105 |
+
__device__ void __nv_threadfence_cluster_impl();
|
| 106 |
+
|
| 107 |
+
__device__ __device_builtin__ float2 __f2AtomicAdd(float2 *, float2);
|
| 108 |
+
__device__ __device_builtin__ float2 __f2AtomicAdd_block(float2 *, float2);
|
| 109 |
+
__device__ __device_builtin__ float2 __f2AtomicAdd_system(float2 *, float2);
|
| 110 |
+
__device__ __device_builtin__ float4 __f4AtomicAdd(float4 *, float4);
|
| 111 |
+
__device__ __device_builtin__ float4 __f4AtomicAdd_block(float4 *, float4);
|
| 112 |
+
__device__ __device_builtin__ float4 __f4AtomicAdd_system(float4 *, float4);
|
| 113 |
+
} // extern "C"
|
| 114 |
+
|
| 115 |
+
__SM_90_RT_DECL__ unsigned __isCtaShared(const void *ptr)
|
| 116 |
+
{
|
| 117 |
+
return __isShared(ptr);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
__SM_90_RT_DECL__ unsigned __isClusterShared(const void *ptr)
|
| 121 |
+
{
|
| 122 |
+
return __nv_isClusterShared_impl(ptr);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
__SM_90_RT_DECL__ void *__cluster_map_shared_rank(const void *ptr,
|
| 126 |
+
unsigned target_block_rank)
|
| 127 |
+
{
|
| 128 |
+
return __nv_cluster_map_shared_rank_impl(ptr, target_block_rank);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
__SM_90_RT_DECL__ unsigned __cluster_query_shared_rank(const void *ptr)
|
| 132 |
+
{
|
| 133 |
+
return __nv_cluster_query_shared_rank_impl(ptr);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
__SM_90_RT_DECL__ uint2 __cluster_map_shared_multicast(const void *ptr,
|
| 137 |
+
unsigned int cluster_cta_mask)
|
| 138 |
+
{
|
| 139 |
+
return make_uint2((unsigned)__cvta_generic_to_shared(ptr), cluster_cta_mask);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
__SM_90_RT_DECL__ unsigned __clusterDimIsSpecified()
|
| 143 |
+
{
|
| 144 |
+
return __nv_clusterDimIsSpecifed_impl();
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
__SM_90_RT_DECL__ dim3 __clusterDim()
|
| 148 |
+
{
|
| 149 |
+
unsigned x, y, z;
|
| 150 |
+
__nv_clusterDim_impl(&x, &y, &z);
|
| 151 |
+
return dim3(x,y,z);
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
__SM_90_RT_DECL__ dim3 __clusterRelativeBlockIdx()
|
| 155 |
+
{
|
| 156 |
+
unsigned x, y, z;
|
| 157 |
+
__nv_clusterRelativeBlockIdx_impl(&x, &y, &z);
|
| 158 |
+
return dim3(x,y,z);
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
__SM_90_RT_DECL__ dim3 __clusterGridDimInClusters()
|
| 162 |
+
{
|
| 163 |
+
unsigned x, y, z;
|
| 164 |
+
__nv_clusterGridDimInClusters_impl(&x, &y, &z);
|
| 165 |
+
return dim3(x,y,z);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
__SM_90_RT_DECL__ dim3 __clusterIdx()
|
| 169 |
+
{
|
| 170 |
+
unsigned x, y, z;
|
| 171 |
+
__nv_clusterIdx_impl(&x, &y, &z);
|
| 172 |
+
return dim3(x,y,z);
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
__SM_90_RT_DECL__ unsigned __clusterRelativeBlockRank()
|
| 176 |
+
{
|
| 177 |
+
return __nv_clusterRelativeBlockRank_impl();
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
__SM_90_RT_DECL__ unsigned __clusterSizeInBlocks()
|
| 181 |
+
{
|
| 182 |
+
return __nv_clusterSizeInBlocks_impl();
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
__SM_90_RT_DECL__ void __cluster_barrier_arrive()
|
| 186 |
+
{
|
| 187 |
+
__nv_cluster_barrier_arrive_impl();
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
__SM_90_RT_DECL__ void __cluster_barrier_arrive_relaxed()
|
| 191 |
+
{
|
| 192 |
+
__nv_cluster_barrier_arrive_relaxed_impl();
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
__SM_90_RT_DECL__ void __cluster_barrier_wait()
|
| 196 |
+
{
|
| 197 |
+
__nv_cluster_barrier_wait_impl();
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
__SM_90_RT_DECL__ void __threadfence_cluster()
|
| 201 |
+
{
|
| 202 |
+
__nv_threadfence_cluster_impl();
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
/* Define __PTR for atomicAdd prototypes below, undef after done */
|
| 207 |
+
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
|
| 208 |
+
#define __PTR "l"
|
| 209 |
+
#else
|
| 210 |
+
#define __PTR "r"
|
| 211 |
+
#endif /*(defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)*/
|
| 212 |
+
|
| 213 |
+
__SM_90_RT_DECL__ float2 atomicAdd(float2 *address, float2 val) {
|
| 214 |
+
return __f2AtomicAdd(address, val);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
__SM_90_RT_DECL__ float2 atomicAdd_block(float2 *address, float2 val) {
|
| 218 |
+
return __f2AtomicAdd_block(address, val);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
__SM_90_RT_DECL__ float2 atomicAdd_system(float2 *address, float2 val) {
|
| 222 |
+
return __f2AtomicAdd_system(address, val);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
__SM_90_RT_DECL__ float4 atomicAdd(float4 *address, float4 val) {
|
| 226 |
+
return __f4AtomicAdd(address, val);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
__SM_90_RT_DECL__ float4 atomicAdd_block(float4 *address, float4 val) {
|
| 230 |
+
return __f4AtomicAdd_block(address, val);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
__SM_90_RT_DECL__ float4 atomicAdd_system(float4 *address, float4 val) {
|
| 234 |
+
return __f4AtomicAdd_system(address, val);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 900 */
|
| 238 |
+
|
| 239 |
+
#endif /* __cplusplus && __CUDACC__ */
|
| 240 |
+
|
| 241 |
+
#undef __SM_90_RT_DECL__
|
| 242 |
+
|
| 243 |
+
#endif /* !__SM_90_RT_HPP__ */
|
| 244 |
+
|
| 245 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__)
|
| 246 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 247 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_SM_90_RT_HPP__
|
| 248 |
+
#endif
|
miniCUDA124/include/crt/storage_class.h
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* NVIDIA_COPYRIGHT_BEGIN
|
| 3 |
+
*
|
| 4 |
+
* Copyright (c) 2008-2018, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 7 |
+
* and proprietary rights in and to this software, related documentation
|
| 8 |
+
* and any modifications thereto. Any use, reproduction, disclosure or
|
| 9 |
+
* distribution of this software and related documentation without an express
|
| 10 |
+
* license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 11 |
+
*
|
| 12 |
+
* NVIDIA_COPYRIGHT_END
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
|
| 16 |
+
#if defined(_MSC_VER)
|
| 17 |
+
#pragma message("crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
|
| 18 |
+
#else
|
| 19 |
+
#warning "crt/storage_class.h is an internal header file and must not be used directly. Please use cuda_runtime_api.h or cuda_runtime.h instead."
|
| 20 |
+
#endif
|
| 21 |
+
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 22 |
+
#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#if !defined(__STORAGE_CLASS_H__)
|
| 26 |
+
#define __STORAGE_CLASS_H__
|
| 27 |
+
|
| 28 |
+
#if !defined(__var_used__)
|
| 29 |
+
|
| 30 |
+
#define __var_used__
|
| 31 |
+
|
| 32 |
+
#endif /* __var_used__ */
|
| 33 |
+
|
| 34 |
+
#if !defined(__loc_sc__)
|
| 35 |
+
|
| 36 |
+
#define __loc_sc__(loc, size, sc) \
|
| 37 |
+
__storage##_##sc##size##loc loc
|
| 38 |
+
|
| 39 |
+
#endif /* !__loc_sc__ */
|
| 40 |
+
|
| 41 |
+
#if !defined(__storage___device__)
|
| 42 |
+
#define __storage___device__ static __var_used__
|
| 43 |
+
#endif /* __storage___device__ */
|
| 44 |
+
|
| 45 |
+
#if !defined(__storage_extern__device__)
|
| 46 |
+
#define __storage_extern__device__ static __var_used__
|
| 47 |
+
#endif /* __storage_extern__device__ */
|
| 48 |
+
|
| 49 |
+
#if !defined(__storage_auto__device__)
|
| 50 |
+
#define __storage_auto__device__ @@@ COMPILER @@@ ERROR @@@
|
| 51 |
+
#endif /* __storage_auto__device__ */
|
| 52 |
+
|
| 53 |
+
#if !defined(__storage_static__device__)
|
| 54 |
+
#define __storage_static__device__ static __var_used__
|
| 55 |
+
#endif /* __storage_static__device__ */
|
| 56 |
+
|
| 57 |
+
#if !defined(__storage___constant__)
|
| 58 |
+
#define __storage___constant__ static __var_used__
|
| 59 |
+
#endif /* __storage___constant__ */
|
| 60 |
+
|
| 61 |
+
#if !defined(__storage_extern__constant__)
|
| 62 |
+
#define __storage_extern__constant__ static __var_used__
|
| 63 |
+
#endif /* __storage_extern__constant__ */
|
| 64 |
+
|
| 65 |
+
#if !defined(__storage_auto__constant__)
|
| 66 |
+
#define __storage_auto__constant__ @@@ COMPILER @@@ ERROR @@@
|
| 67 |
+
#endif /* __storage_auto__constant__ */
|
| 68 |
+
|
| 69 |
+
#if !defined(__storage_static__constant__)
|
| 70 |
+
#define __storage_static__constant__ static __var_used__
|
| 71 |
+
#endif /* __storage_static__constant__ */
|
| 72 |
+
|
| 73 |
+
#if !defined(__storage___shared__)
|
| 74 |
+
#define __storage___shared__ static __var_used__
|
| 75 |
+
#endif /* __storage___shared__ */
|
| 76 |
+
|
| 77 |
+
#if !defined(__storage_extern__shared__)
|
| 78 |
+
#define __storage_extern__shared__ static __var_used__
|
| 79 |
+
#endif /* __storage_extern__shared__ */
|
| 80 |
+
|
| 81 |
+
#if !defined(__storage_auto__shared__)
|
| 82 |
+
#define __storage_auto__shared__ static
|
| 83 |
+
#endif /* __storage_auto__shared__ */
|
| 84 |
+
|
| 85 |
+
#if !defined(__storage_static__shared__)
|
| 86 |
+
#define __storage_static__shared__ static __var_used__
|
| 87 |
+
#endif /* __storage_static__shared__ */
|
| 88 |
+
|
| 89 |
+
#if !defined(__storage__unsized__shared__)
|
| 90 |
+
#define __storage__unsized__shared__ @@@ COMPILER @@@ ERROR @@@
|
| 91 |
+
#endif /* __storage__unsized__shared__ */
|
| 92 |
+
|
| 93 |
+
#if !defined(__storage_extern_unsized__shared__)
|
| 94 |
+
#define __storage_extern_unsized__shared__ static __var_used__
|
| 95 |
+
#endif /* __storage_extern_unsized__shared__ */
|
| 96 |
+
|
| 97 |
+
#if !defined(__storage_auto_unsized__shared__)
|
| 98 |
+
#define __storage_auto_unsized__shared__ @@@ COMPILER @@@ ERROR @@@
|
| 99 |
+
#endif /* __storage_auto_unsized__shared__ */
|
| 100 |
+
|
| 101 |
+
#if !defined(__storage_static_unsized__shared__)
|
| 102 |
+
#define __storage_static_unsized__shared__ @@@ COMPILER @@@ ERROR @@@
|
| 103 |
+
#endif /* __storage_static_unsized__shared__ */
|
| 104 |
+
|
| 105 |
+
#if !defined(__storage___text__)
|
| 106 |
+
#define __storage___text__ static __var_used__
|
| 107 |
+
#endif /* __storage___text__ */
|
| 108 |
+
|
| 109 |
+
#if !defined(__storage_extern__text__)
|
| 110 |
+
#define __storage_extern__text__ static __var_used__
|
| 111 |
+
#endif /* __storage_extern__text__ */
|
| 112 |
+
|
| 113 |
+
#if !defined(__storage_auto__text__)
|
| 114 |
+
#define __storage_auto__text__ @@@ COMPILER @@@ ERROR @@@
|
| 115 |
+
#endif /* __storage_auto__text__ */
|
| 116 |
+
|
| 117 |
+
#if !defined(__storage_static__text__)
|
| 118 |
+
#define __storage_static__text__ static __var_used__
|
| 119 |
+
#endif /* __storage_static__text__ */
|
| 120 |
+
|
| 121 |
+
#if !defined(__storage___surf__)
|
| 122 |
+
#define __storage___surf__ static __var_used__
|
| 123 |
+
#endif /* __storage___surf__ */
|
| 124 |
+
|
| 125 |
+
#if !defined(__storage_extern__surf__)
|
| 126 |
+
#define __storage_extern__surf__ static __var_used__
|
| 127 |
+
#endif /* __storage_extern__surf__ */
|
| 128 |
+
|
| 129 |
+
#if !defined(__storage_auto__surf__)
|
| 130 |
+
#define __storage_auto__surf__ @@@ COMPILER @@@ ERROR @@@
|
| 131 |
+
#endif /* __storage_auto__surf__ */
|
| 132 |
+
|
| 133 |
+
#if !defined(__storage_static__surf__)
|
| 134 |
+
#define __storage_static__surf__ static __var_used__
|
| 135 |
+
#endif /* __storage_static__surf__ */
|
| 136 |
+
|
| 137 |
+
#endif /* !__STORAGE_CLASS_H__ */
|
| 138 |
+
|
| 139 |
+
#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__)
|
| 140 |
+
#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
|
| 141 |
+
#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_STORAGE_CLASS_H__
|
| 142 |
+
#endif
|
miniCUDA124/include/cub/config.cuh
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Redistribution and use in source and binary forms, with or without
|
| 5 |
+
* modification, are permitted provided that the following conditions are met:
|
| 6 |
+
* * Redistributions of source code must retain the above copyright
|
| 7 |
+
* notice, this list of conditions and the following disclaimer.
|
| 8 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 9 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 10 |
+
* documentation and/or other materials provided with the distribution.
|
| 11 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 12 |
+
* names of its contributors may be used to endorse or promote products
|
| 13 |
+
* derived from this software without specific prior written permission.
|
| 14 |
+
*
|
| 15 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 16 |
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 17 |
+
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 18 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 19 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 20 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 21 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 22 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 23 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 24 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 25 |
+
*
|
| 26 |
+
******************************************************************************/
|
| 27 |
+
|
| 28 |
+
/**
|
| 29 |
+
* \file
|
| 30 |
+
* Static configuration header for the CUB project.
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
#pragma once
|
| 34 |
+
|
| 35 |
+
// For _CCCL_IMPLICIT_SYSTEM_HEADER
|
| 36 |
+
#include <cuda/__cccl_config>
|
| 37 |
+
|
| 38 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 39 |
+
# pragma GCC system_header
|
| 40 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 41 |
+
# pragma clang system_header
|
| 42 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 43 |
+
# pragma system_header
|
| 44 |
+
#endif // no system header
|
| 45 |
+
|
| 46 |
+
#include <cub/util_arch.cuh>
|
| 47 |
+
#include <cub/util_compiler.cuh>
|
| 48 |
+
#include <cub/util_cpp_dialect.cuh>
|
| 49 |
+
#include <cub/util_deprecated.cuh>
|
| 50 |
+
#include <cub/util_macro.cuh>
|
| 51 |
+
#include <cub/util_namespace.cuh>
|
miniCUDA124/include/cub/cub.cuh
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2011, Duane Merrill. All rights reserved.
|
| 3 |
+
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
* * Redistributions of source code must retain the above copyright
|
| 8 |
+
* notice, this list of conditions and the following disclaimer.
|
| 9 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 10 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 11 |
+
* documentation and/or other materials provided with the distribution.
|
| 12 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 13 |
+
* names of its contributors may be used to endorse or promote products
|
| 14 |
+
* derived from this software without specific prior written permission.
|
| 15 |
+
*
|
| 16 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 17 |
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 18 |
+
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 19 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 20 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 21 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 22 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 23 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 24 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 25 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 26 |
+
*
|
| 27 |
+
******************************************************************************/
|
| 28 |
+
|
| 29 |
+
/**
|
| 30 |
+
* \file
|
| 31 |
+
* CUB umbrella include file
|
| 32 |
+
*/
|
| 33 |
+
|
| 34 |
+
#pragma once
|
| 35 |
+
|
| 36 |
+
// Static configuration
|
| 37 |
+
#include <cub/config.cuh>
|
| 38 |
+
|
| 39 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 40 |
+
# pragma GCC system_header
|
| 41 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 42 |
+
# pragma clang system_header
|
| 43 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 44 |
+
# pragma system_header
|
| 45 |
+
#endif // no system header
|
| 46 |
+
|
| 47 |
+
// Block
|
| 48 |
+
#include <cub/block/block_adjacent_difference.cuh>
|
| 49 |
+
#include <cub/block/block_discontinuity.cuh>
|
| 50 |
+
#include <cub/block/block_exchange.cuh>
|
| 51 |
+
#include <cub/block/block_histogram.cuh>
|
| 52 |
+
#include <cub/block/block_load.cuh>
|
| 53 |
+
#include <cub/block/block_merge_sort.cuh>
|
| 54 |
+
#include <cub/block/block_radix_rank.cuh>
|
| 55 |
+
#include <cub/block/block_radix_sort.cuh>
|
| 56 |
+
#include <cub/block/block_reduce.cuh>
|
| 57 |
+
#include <cub/block/block_scan.cuh>
|
| 58 |
+
#include <cub/block/block_store.cuh>
|
| 59 |
+
// #include <cub/block/block_shift.cuh>
|
| 60 |
+
|
| 61 |
+
// Device
|
| 62 |
+
#include <cub/device/device_adjacent_difference.cuh>
|
| 63 |
+
#include <cub/device/device_copy.cuh>
|
| 64 |
+
#include <cub/device/device_histogram.cuh>
|
| 65 |
+
#include <cub/device/device_memcpy.cuh>
|
| 66 |
+
#include <cub/device/device_merge_sort.cuh>
|
| 67 |
+
#include <cub/device/device_partition.cuh>
|
| 68 |
+
#include <cub/device/device_radix_sort.cuh>
|
| 69 |
+
#include <cub/device/device_reduce.cuh>
|
| 70 |
+
#include <cub/device/device_run_length_encode.cuh>
|
| 71 |
+
#include <cub/device/device_scan.cuh>
|
| 72 |
+
#include <cub/device/device_segmented_radix_sort.cuh>
|
| 73 |
+
#include <cub/device/device_segmented_reduce.cuh>
|
| 74 |
+
#include <cub/device/device_segmented_sort.cuh>
|
| 75 |
+
#include <cub/device/device_select.cuh>
|
| 76 |
+
#include <cub/device/device_spmv.cuh>
|
| 77 |
+
|
| 78 |
+
// Grid
|
| 79 |
+
// #include <cub/grid/grid_barrier.cuh>
|
| 80 |
+
#include <cub/grid/grid_even_share.cuh>
|
| 81 |
+
#include <cub/grid/grid_mapping.cuh>
|
| 82 |
+
#include <cub/grid/grid_queue.cuh>
|
| 83 |
+
|
| 84 |
+
// Thread
|
| 85 |
+
#include <cub/thread/thread_load.cuh>
|
| 86 |
+
#include <cub/thread/thread_operators.cuh>
|
| 87 |
+
#include <cub/thread/thread_reduce.cuh>
|
| 88 |
+
#include <cub/thread/thread_scan.cuh>
|
| 89 |
+
#include <cub/thread/thread_store.cuh>
|
| 90 |
+
|
| 91 |
+
// Warp
|
| 92 |
+
#include <cub/warp/warp_exchange.cuh>
|
| 93 |
+
#include <cub/warp/warp_load.cuh>
|
| 94 |
+
#include <cub/warp/warp_merge_sort.cuh>
|
| 95 |
+
#include <cub/warp/warp_reduce.cuh>
|
| 96 |
+
#include <cub/warp/warp_scan.cuh>
|
| 97 |
+
#include <cub/warp/warp_store.cuh>
|
| 98 |
+
|
| 99 |
+
// Iterator
|
| 100 |
+
#include <cub/iterator/arg_index_input_iterator.cuh>
|
| 101 |
+
#include <cub/iterator/cache_modified_input_iterator.cuh>
|
| 102 |
+
#include <cub/iterator/cache_modified_output_iterator.cuh>
|
| 103 |
+
#include <cub/iterator/constant_input_iterator.cuh>
|
| 104 |
+
#include <cub/iterator/counting_input_iterator.cuh>
|
| 105 |
+
#include <cub/iterator/discard_output_iterator.cuh>
|
| 106 |
+
#include <cub/iterator/tex_obj_input_iterator.cuh>
|
| 107 |
+
#include <cub/iterator/tex_ref_input_iterator.cuh>
|
| 108 |
+
#include <cub/iterator/transform_input_iterator.cuh>
|
| 109 |
+
|
| 110 |
+
// Util
|
| 111 |
+
#include <cub/util_allocator.cuh>
|
| 112 |
+
#include <cub/util_debug.cuh>
|
| 113 |
+
#include <cub/util_device.cuh>
|
| 114 |
+
#include <cub/util_ptx.cuh>
|
| 115 |
+
#include <cub/util_temporary_storage.cuh>
|
| 116 |
+
#include <cub/util_type.cuh>
|
miniCUDA124/include/cub/util_allocator.cuh
ADDED
|
@@ -0,0 +1,880 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2011, Duane Merrill. All rights reserved.
|
| 3 |
+
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
* * Redistributions of source code must retain the above copyright
|
| 8 |
+
* notice, this list of conditions and the following disclaimer.
|
| 9 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 10 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 11 |
+
* documentation and/or other materials provided with the distribution.
|
| 12 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 13 |
+
* names of its contributors may be used to endorse or promote products
|
| 14 |
+
* derived from this software without specific prior written permission.
|
| 15 |
+
*
|
| 16 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 17 |
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 18 |
+
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 19 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 20 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 21 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 22 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 23 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 24 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 25 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 26 |
+
*
|
| 27 |
+
******************************************************************************/
|
| 28 |
+
|
| 29 |
+
/******************************************************************************
|
| 30 |
+
* Simple caching allocator for device memory allocations. The allocator is
|
| 31 |
+
* thread-safe and capable of managing device allocations on multiple devices.
|
| 32 |
+
******************************************************************************/
|
| 33 |
+
|
| 34 |
+
#pragma once
|
| 35 |
+
|
| 36 |
+
#include <cub/config.cuh>
|
| 37 |
+
|
| 38 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 39 |
+
# pragma GCC system_header
|
| 40 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 41 |
+
# pragma clang system_header
|
| 42 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 43 |
+
# pragma system_header
|
| 44 |
+
#endif // no system header
|
| 45 |
+
|
| 46 |
+
#include <cub/util_debug.cuh>
|
| 47 |
+
#include <cub/util_namespace.cuh>
|
| 48 |
+
|
| 49 |
+
#include <map>
|
| 50 |
+
#include <mutex>
|
| 51 |
+
#include <set>
|
| 52 |
+
|
| 53 |
+
#include <math.h>
|
| 54 |
+
|
| 55 |
+
CUB_NAMESPACE_BEGIN
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
/**
|
| 59 |
+
* @addtogroup UtilMgmt
|
| 60 |
+
* @{
|
| 61 |
+
*/
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
/******************************************************************************
|
| 65 |
+
* CachingDeviceAllocator (host use)
|
| 66 |
+
******************************************************************************/
|
| 67 |
+
|
| 68 |
+
/**
|
| 69 |
+
* @brief A simple caching allocator for device memory allocations.
|
| 70 |
+
*
|
| 71 |
+
* @par Overview
|
| 72 |
+
* The allocator is thread-safe and stream-safe and is capable of managing cached
|
| 73 |
+
* device allocations on multiple devices. It behaves as follows:
|
| 74 |
+
*
|
| 75 |
+
* @par
|
| 76 |
+
* - Allocations from the allocator are associated with an @p active_stream. Once freed,
|
| 77 |
+
* the allocation becomes available immediately for reuse within the @p active_stream
|
| 78 |
+
* with which it was associated with during allocation, and it becomes available for
|
| 79 |
+
* reuse within other streams when all prior work submitted to @p active_stream has completed.
|
| 80 |
+
* - Allocations are categorized and cached by bin size. A new allocation request of
|
| 81 |
+
* a given size will only consider cached allocations within the corresponding bin.
|
| 82 |
+
* - Bin limits progress geometrically in accordance with the growth factor
|
| 83 |
+
* @p bin_growth provided during construction. Unused device allocations within
|
| 84 |
+
* a larger bin cache are not reused for allocation requests that categorize to
|
| 85 |
+
* smaller bin sizes.
|
| 86 |
+
* - Allocation requests below ( @p bin_growth ^ @p min_bin ) are rounded up to
|
| 87 |
+
* ( @p bin_growth ^ @p min_bin ).
|
| 88 |
+
* - Allocations above ( @p bin_growth ^ @p max_bin ) are not rounded up to the nearest
|
| 89 |
+
* bin and are simply freed when they are deallocated instead of being returned
|
| 90 |
+
* to a bin-cache.
|
| 91 |
+
* - If the total storage of cached allocations on a given device will exceed
|
| 92 |
+
* @p max_cached_bytes, allocations for that device are simply freed when they are
|
| 93 |
+
* deallocated instead of being returned to their bin-cache.
|
| 94 |
+
*
|
| 95 |
+
* @par
|
| 96 |
+
* For example, the default-constructed CachingDeviceAllocator is configured with:
|
| 97 |
+
* - @p bin_growth = 8
|
| 98 |
+
* - @p min_bin = 3
|
| 99 |
+
* - @p max_bin = 7
|
| 100 |
+
* - @p max_cached_bytes = 6MB - 1B
|
| 101 |
+
*
|
| 102 |
+
* @par
|
| 103 |
+
* which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB
|
| 104 |
+
* and sets a maximum of 6,291,455 cached bytes per device
|
| 105 |
+
*
|
| 106 |
+
*/
|
| 107 |
+
struct CachingDeviceAllocator
|
| 108 |
+
{
|
| 109 |
+
|
| 110 |
+
//---------------------------------------------------------------------
|
| 111 |
+
// Constants
|
| 112 |
+
//---------------------------------------------------------------------
|
| 113 |
+
|
| 114 |
+
/// Out-of-bounds bin
|
| 115 |
+
static constexpr unsigned int INVALID_BIN = (unsigned int) -1;
|
| 116 |
+
|
| 117 |
+
/// Invalid size
|
| 118 |
+
static constexpr size_t INVALID_SIZE = (size_t) -1;
|
| 119 |
+
|
| 120 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
|
| 121 |
+
|
| 122 |
+
/// Invalid device ordinal
|
| 123 |
+
static constexpr int INVALID_DEVICE_ORDINAL = -1;
|
| 124 |
+
|
| 125 |
+
//---------------------------------------------------------------------
|
| 126 |
+
// Type definitions and helper types
|
| 127 |
+
//---------------------------------------------------------------------
|
| 128 |
+
|
| 129 |
+
/**
|
| 130 |
+
* Descriptor for device memory allocations
|
| 131 |
+
*/
|
| 132 |
+
struct BlockDescriptor
|
| 133 |
+
{
|
| 134 |
+
// Device pointer
|
| 135 |
+
void *d_ptr;
|
| 136 |
+
|
| 137 |
+
// Size of allocation in bytes
|
| 138 |
+
size_t bytes;
|
| 139 |
+
|
| 140 |
+
// Bin enumeration
|
| 141 |
+
unsigned int bin;
|
| 142 |
+
|
| 143 |
+
// device ordinal
|
| 144 |
+
int device;
|
| 145 |
+
|
| 146 |
+
// Associated associated_stream
|
| 147 |
+
cudaStream_t associated_stream;
|
| 148 |
+
|
| 149 |
+
// Signal when associated stream has run to the point at which this block was freed
|
| 150 |
+
cudaEvent_t ready_event;
|
| 151 |
+
|
| 152 |
+
// Constructor (suitable for searching maps for a specific block, given its pointer and
|
| 153 |
+
// device)
|
| 154 |
+
BlockDescriptor(void *d_ptr, int device)
|
| 155 |
+
: d_ptr(d_ptr)
|
| 156 |
+
, bytes(0)
|
| 157 |
+
, bin(INVALID_BIN)
|
| 158 |
+
, device(device)
|
| 159 |
+
, associated_stream(0)
|
| 160 |
+
, ready_event(0)
|
| 161 |
+
{}
|
| 162 |
+
|
| 163 |
+
// Constructor (suitable for searching maps for a range of suitable blocks, given a device)
|
| 164 |
+
BlockDescriptor(int device)
|
| 165 |
+
: d_ptr(NULL)
|
| 166 |
+
, bytes(0)
|
| 167 |
+
, bin(INVALID_BIN)
|
| 168 |
+
, device(device)
|
| 169 |
+
, associated_stream(0)
|
| 170 |
+
, ready_event(0)
|
| 171 |
+
{}
|
| 172 |
+
|
| 173 |
+
// Comparison functor for comparing device pointers
|
| 174 |
+
static bool PtrCompare(const BlockDescriptor &a, const BlockDescriptor &b)
|
| 175 |
+
{
|
| 176 |
+
if (a.device == b.device)
|
| 177 |
+
return (a.d_ptr < b.d_ptr);
|
| 178 |
+
else
|
| 179 |
+
return (a.device < b.device);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
// Comparison functor for comparing allocation sizes
|
| 183 |
+
static bool SizeCompare(const BlockDescriptor &a, const BlockDescriptor &b)
|
| 184 |
+
{
|
| 185 |
+
if (a.device == b.device)
|
| 186 |
+
return (a.bytes < b.bytes);
|
| 187 |
+
else
|
| 188 |
+
return (a.device < b.device);
|
| 189 |
+
}
|
| 190 |
+
};
|
| 191 |
+
|
| 192 |
+
/// BlockDescriptor comparator function interface
|
| 193 |
+
typedef bool (*Compare)(const BlockDescriptor &, const BlockDescriptor &);
|
| 194 |
+
|
| 195 |
+
class TotalBytes {
|
| 196 |
+
public:
|
| 197 |
+
size_t free;
|
| 198 |
+
size_t live;
|
| 199 |
+
TotalBytes() { free = live = 0; }
|
| 200 |
+
};
|
| 201 |
+
|
| 202 |
+
/// Set type for cached blocks (ordered by size)
|
| 203 |
+
typedef std::multiset<BlockDescriptor, Compare> CachedBlocks;
|
| 204 |
+
|
| 205 |
+
/// Set type for live blocks (ordered by ptr)
|
| 206 |
+
typedef std::multiset<BlockDescriptor, Compare> BusyBlocks;
|
| 207 |
+
|
| 208 |
+
/// Map type of device ordinals to the number of cached bytes cached by each device
|
| 209 |
+
typedef std::map<int, TotalBytes> GpuCachedBytes;
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
//---------------------------------------------------------------------
|
| 213 |
+
// Utility functions
|
| 214 |
+
//---------------------------------------------------------------------
|
| 215 |
+
|
| 216 |
+
/**
|
| 217 |
+
* Integer pow function for unsigned base and exponent
|
| 218 |
+
*/
|
| 219 |
+
static unsigned int IntPow(
|
| 220 |
+
unsigned int base,
|
| 221 |
+
unsigned int exp)
|
| 222 |
+
{
|
| 223 |
+
unsigned int retval = 1;
|
| 224 |
+
while (exp > 0)
|
| 225 |
+
{
|
| 226 |
+
if (exp & 1) {
|
| 227 |
+
retval = retval * base; // multiply the result by the current base
|
| 228 |
+
}
|
| 229 |
+
base = base * base; // square the base
|
| 230 |
+
exp = exp >> 1; // divide the exponent in half
|
| 231 |
+
}
|
| 232 |
+
return retval;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
/**
|
| 237 |
+
* Round up to the nearest power-of
|
| 238 |
+
*/
|
| 239 |
+
void NearestPowerOf(
|
| 240 |
+
unsigned int &power,
|
| 241 |
+
size_t &rounded_bytes,
|
| 242 |
+
unsigned int base,
|
| 243 |
+
size_t value)
|
| 244 |
+
{
|
| 245 |
+
power = 0;
|
| 246 |
+
rounded_bytes = 1;
|
| 247 |
+
|
| 248 |
+
if (value * base < value)
|
| 249 |
+
{
|
| 250 |
+
// Overflow
|
| 251 |
+
power = sizeof(size_t) * 8;
|
| 252 |
+
rounded_bytes = size_t(0) - 1;
|
| 253 |
+
return;
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
while (rounded_bytes < value)
|
| 257 |
+
{
|
| 258 |
+
rounded_bytes *= base;
|
| 259 |
+
power++;
|
| 260 |
+
}
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
//---------------------------------------------------------------------
|
| 264 |
+
// Fields
|
| 265 |
+
//---------------------------------------------------------------------
|
| 266 |
+
|
| 267 |
+
/// Mutex for thread-safety
|
| 268 |
+
std::mutex mutex;
|
| 269 |
+
|
| 270 |
+
/// Geometric growth factor for bin-sizes
|
| 271 |
+
unsigned int bin_growth;
|
| 272 |
+
|
| 273 |
+
/// Minimum bin enumeration
|
| 274 |
+
unsigned int min_bin;
|
| 275 |
+
|
| 276 |
+
/// Maximum bin enumeration
|
| 277 |
+
unsigned int max_bin;
|
| 278 |
+
|
| 279 |
+
/// Minimum bin size
|
| 280 |
+
size_t min_bin_bytes;
|
| 281 |
+
|
| 282 |
+
/// Maximum bin size
|
| 283 |
+
size_t max_bin_bytes;
|
| 284 |
+
|
| 285 |
+
/// Maximum aggregate cached bytes per device
|
| 286 |
+
size_t max_cached_bytes;
|
| 287 |
+
|
| 288 |
+
/// Whether or not to skip a call to FreeAllCached() when destructor is called.
|
| 289 |
+
/// (The CUDA runtime may have already shut down for statically declared allocators)
|
| 290 |
+
const bool skip_cleanup;
|
| 291 |
+
|
| 292 |
+
/// Whether or not to print (de)allocation events to stdout
|
| 293 |
+
bool debug;
|
| 294 |
+
|
| 295 |
+
/// Map of device ordinal to aggregate cached bytes on that device
|
| 296 |
+
GpuCachedBytes cached_bytes;
|
| 297 |
+
|
| 298 |
+
/// Set of cached device allocations available for reuse
|
| 299 |
+
CachedBlocks cached_blocks;
|
| 300 |
+
|
| 301 |
+
/// Set of live device allocations currently in use
|
| 302 |
+
BusyBlocks live_blocks;
|
| 303 |
+
|
| 304 |
+
#endif // DOXYGEN_SHOULD_SKIP_THIS
|
| 305 |
+
|
| 306 |
+
//---------------------------------------------------------------------
|
| 307 |
+
// Methods
|
| 308 |
+
//---------------------------------------------------------------------
|
| 309 |
+
|
| 310 |
+
/**
|
| 311 |
+
* @brief Constructor.
|
| 312 |
+
*
|
| 313 |
+
* @param bin_growth
|
| 314 |
+
* Geometric growth factor for bin-sizes
|
| 315 |
+
*
|
| 316 |
+
* @param min_bin
|
| 317 |
+
* Minimum bin (default is bin_growth ^ 1)
|
| 318 |
+
*
|
| 319 |
+
* @param max_bin
|
| 320 |
+
* Maximum bin (default is no max bin)
|
| 321 |
+
*
|
| 322 |
+
* @param max_cached_bytes
|
| 323 |
+
* Maximum aggregate cached bytes per device (default is no limit)
|
| 324 |
+
*
|
| 325 |
+
* @param skip_cleanup
|
| 326 |
+
* Whether or not to skip a call to @p FreeAllCached() when the destructor is called (default
|
| 327 |
+
* is to deallocate)
|
| 328 |
+
*
|
| 329 |
+
* @param debug
|
| 330 |
+
* Whether or not to print (de)allocation events to stdout (default is no stderr output)
|
| 331 |
+
*/
|
| 332 |
+
CachingDeviceAllocator(unsigned int bin_growth,
|
| 333 |
+
unsigned int min_bin = 1,
|
| 334 |
+
unsigned int max_bin = INVALID_BIN,
|
| 335 |
+
size_t max_cached_bytes = INVALID_SIZE,
|
| 336 |
+
bool skip_cleanup = false,
|
| 337 |
+
bool debug = false)
|
| 338 |
+
: bin_growth(bin_growth)
|
| 339 |
+
, min_bin(min_bin)
|
| 340 |
+
, max_bin(max_bin)
|
| 341 |
+
, min_bin_bytes(IntPow(bin_growth, min_bin))
|
| 342 |
+
, max_bin_bytes(IntPow(bin_growth, max_bin))
|
| 343 |
+
, max_cached_bytes(max_cached_bytes)
|
| 344 |
+
, skip_cleanup(skip_cleanup)
|
| 345 |
+
, debug(debug)
|
| 346 |
+
, cached_blocks(BlockDescriptor::SizeCompare)
|
| 347 |
+
, live_blocks(BlockDescriptor::PtrCompare)
|
| 348 |
+
{}
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
/**
|
| 352 |
+
* @brief Default constructor.
|
| 353 |
+
*
|
| 354 |
+
* Configured with:
|
| 355 |
+
* @par
|
| 356 |
+
* - @p bin_growth = 8
|
| 357 |
+
* - @p min_bin = 3
|
| 358 |
+
* - @p max_bin = 7
|
| 359 |
+
* - @p max_cached_bytes = ( @p bin_growth ^ @p max_bin) * 3 ) - 1 = 6,291,455 bytes
|
| 360 |
+
*
|
| 361 |
+
* which delineates five bin-sizes: 512B, 4KB, 32KB, 256KB, and 2MB and
|
| 362 |
+
* sets a maximum of 6,291,455 cached bytes per device
|
| 363 |
+
*/
|
| 364 |
+
CachingDeviceAllocator(
|
| 365 |
+
bool skip_cleanup = false,
|
| 366 |
+
bool debug = false)
|
| 367 |
+
:
|
| 368 |
+
bin_growth(8),
|
| 369 |
+
min_bin(3),
|
| 370 |
+
max_bin(7),
|
| 371 |
+
min_bin_bytes(IntPow(bin_growth, min_bin)),
|
| 372 |
+
max_bin_bytes(IntPow(bin_growth, max_bin)),
|
| 373 |
+
max_cached_bytes((max_bin_bytes * 3) - 1),
|
| 374 |
+
skip_cleanup(skip_cleanup),
|
| 375 |
+
debug(debug),
|
| 376 |
+
cached_blocks(BlockDescriptor::SizeCompare),
|
| 377 |
+
live_blocks(BlockDescriptor::PtrCompare)
|
| 378 |
+
{}
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
/**
|
| 382 |
+
* @brief Sets the limit on the number bytes this allocator is allowed to cache per device.
|
| 383 |
+
*
|
| 384 |
+
* Changing the ceiling of cached bytes does not cause any allocations (in-use or
|
| 385 |
+
* cached-in-reserve) to be freed. See \p FreeAllCached().
|
| 386 |
+
*/
|
| 387 |
+
cudaError_t SetMaxCachedBytes(size_t max_cached_bytes_)
|
| 388 |
+
{
|
| 389 |
+
// Lock
|
| 390 |
+
mutex.lock();
|
| 391 |
+
|
| 392 |
+
if (debug) _CubLog("Changing max_cached_bytes (%lld -> %lld)\n", (long long) this->max_cached_bytes, (long long) max_cached_bytes_);
|
| 393 |
+
|
| 394 |
+
this->max_cached_bytes = max_cached_bytes_;
|
| 395 |
+
|
| 396 |
+
// Unlock
|
| 397 |
+
mutex.unlock();
|
| 398 |
+
|
| 399 |
+
return cudaSuccess;
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
/**
|
| 403 |
+
* @brief Provides a suitable allocation of device memory for the given size on the specified
|
| 404 |
+
* device.
|
| 405 |
+
*
|
| 406 |
+
* Once freed, the allocation becomes available immediately for reuse within the @p
|
| 407 |
+
* active_stream with which it was associated with during allocation, and it becomes available
|
| 408 |
+
* for reuse within other streams when all prior work submitted to @p active_stream has
|
| 409 |
+
* completed.
|
| 410 |
+
*
|
| 411 |
+
* @param[in] device
|
| 412 |
+
* Device on which to place the allocation
|
| 413 |
+
*
|
| 414 |
+
* @param[out] d_ptr
|
| 415 |
+
* Reference to pointer to the allocation
|
| 416 |
+
*
|
| 417 |
+
* @param[in] bytes
|
| 418 |
+
* Minimum number of bytes for the allocation
|
| 419 |
+
*
|
| 420 |
+
* @param[in] active_stream
|
| 421 |
+
* The stream to be associated with this allocation
|
| 422 |
+
*/
|
| 423 |
+
cudaError_t
|
| 424 |
+
DeviceAllocate(int device, void **d_ptr, size_t bytes, cudaStream_t active_stream = 0)
|
| 425 |
+
{
|
| 426 |
+
*d_ptr = NULL;
|
| 427 |
+
int entrypoint_device = INVALID_DEVICE_ORDINAL;
|
| 428 |
+
cudaError_t error = cudaSuccess;
|
| 429 |
+
|
| 430 |
+
if (device == INVALID_DEVICE_ORDINAL)
|
| 431 |
+
{
|
| 432 |
+
error = CubDebug(cudaGetDevice(&entrypoint_device));
|
| 433 |
+
if (cudaSuccess != error)
|
| 434 |
+
{
|
| 435 |
+
return error;
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
device = entrypoint_device;
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
// Create a block descriptor for the requested allocation
|
| 442 |
+
bool found = false;
|
| 443 |
+
BlockDescriptor search_key(device);
|
| 444 |
+
search_key.associated_stream = active_stream;
|
| 445 |
+
NearestPowerOf(search_key.bin, search_key.bytes, bin_growth, bytes);
|
| 446 |
+
|
| 447 |
+
if (search_key.bin > max_bin)
|
| 448 |
+
{
|
| 449 |
+
// Bin is greater than our maximum bin: allocate the request
|
| 450 |
+
// exactly and give out-of-bounds bin. It will not be cached
|
| 451 |
+
// for reuse when returned.
|
| 452 |
+
search_key.bin = INVALID_BIN;
|
| 453 |
+
search_key.bytes = bytes;
|
| 454 |
+
}
|
| 455 |
+
else
|
| 456 |
+
{
|
| 457 |
+
// Search for a suitable cached allocation: lock
|
| 458 |
+
mutex.lock();
|
| 459 |
+
|
| 460 |
+
if (search_key.bin < min_bin)
|
| 461 |
+
{
|
| 462 |
+
// Bin is less than minimum bin: round up
|
| 463 |
+
search_key.bin = min_bin;
|
| 464 |
+
search_key.bytes = min_bin_bytes;
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
// Iterate through the range of cached blocks on the same device in the same bin
|
| 468 |
+
CachedBlocks::iterator block_itr = cached_blocks.lower_bound(search_key);
|
| 469 |
+
while ((block_itr != cached_blocks.end())
|
| 470 |
+
&& (block_itr->device == device)
|
| 471 |
+
&& (block_itr->bin == search_key.bin))
|
| 472 |
+
{
|
| 473 |
+
// To prevent races with reusing blocks returned by the host but still
|
| 474 |
+
// in use by the device, only consider cached blocks that are
|
| 475 |
+
// either (from the active stream) or (from an idle stream)
|
| 476 |
+
bool is_reusable = false;
|
| 477 |
+
if (active_stream == block_itr->associated_stream)
|
| 478 |
+
{
|
| 479 |
+
is_reusable = true;
|
| 480 |
+
}
|
| 481 |
+
else
|
| 482 |
+
{
|
| 483 |
+
const cudaError_t event_status = cudaEventQuery(block_itr->ready_event);
|
| 484 |
+
if(event_status != cudaErrorNotReady)
|
| 485 |
+
{
|
| 486 |
+
CubDebug(event_status);
|
| 487 |
+
is_reusable = true;
|
| 488 |
+
}
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
if(is_reusable)
|
| 492 |
+
{
|
| 493 |
+
// Reuse existing cache block. Insert into live blocks.
|
| 494 |
+
found = true;
|
| 495 |
+
search_key = *block_itr;
|
| 496 |
+
search_key.associated_stream = active_stream;
|
| 497 |
+
live_blocks.insert(search_key);
|
| 498 |
+
|
| 499 |
+
// Remove from free blocks
|
| 500 |
+
cached_bytes[device].free -= search_key.bytes;
|
| 501 |
+
cached_bytes[device].live += search_key.bytes;
|
| 502 |
+
|
| 503 |
+
if (debug) _CubLog("\tDevice %d reused cached block at %p (%lld bytes) for stream %lld (previously associated with stream %lld).\n",
|
| 504 |
+
device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) block_itr->associated_stream);
|
| 505 |
+
|
| 506 |
+
cached_blocks.erase(block_itr);
|
| 507 |
+
|
| 508 |
+
break;
|
| 509 |
+
}
|
| 510 |
+
block_itr++;
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
// Done searching: unlock
|
| 514 |
+
mutex.unlock();
|
| 515 |
+
}
|
| 516 |
+
|
| 517 |
+
// Allocate the block if necessary
|
| 518 |
+
if (!found)
|
| 519 |
+
{
|
| 520 |
+
// Set runtime's current device to specified device (entrypoint may not be set)
|
| 521 |
+
if (device != entrypoint_device)
|
| 522 |
+
{
|
| 523 |
+
error = CubDebug(cudaGetDevice(&entrypoint_device));
|
| 524 |
+
if (cudaSuccess != error)
|
| 525 |
+
{
|
| 526 |
+
return error;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
error = CubDebug(cudaSetDevice(device));
|
| 530 |
+
if (cudaSuccess != error)
|
| 531 |
+
{
|
| 532 |
+
return error;
|
| 533 |
+
}
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
// Attempt to allocate
|
| 537 |
+
error = CubDebug(cudaMalloc(&search_key.d_ptr, search_key.bytes));
|
| 538 |
+
if (error == cudaErrorMemoryAllocation)
|
| 539 |
+
{
|
| 540 |
+
// The allocation attempt failed: free all cached blocks on device and retry
|
| 541 |
+
if (debug) _CubLog("\tDevice %d failed to allocate %lld bytes for stream %lld, retrying after freeing cached allocations",
|
| 542 |
+
device, (long long) search_key.bytes, (long long) search_key.associated_stream);
|
| 543 |
+
|
| 544 |
+
error = cudaSuccess; // Reset the error we will return
|
| 545 |
+
cudaGetLastError(); // Reset CUDART's error
|
| 546 |
+
|
| 547 |
+
// Lock
|
| 548 |
+
mutex.lock();
|
| 549 |
+
|
| 550 |
+
// Iterate the range of free blocks on the same device
|
| 551 |
+
BlockDescriptor free_key(device);
|
| 552 |
+
CachedBlocks::iterator block_itr = cached_blocks.lower_bound(free_key);
|
| 553 |
+
|
| 554 |
+
while ((block_itr != cached_blocks.end()) && (block_itr->device == device))
|
| 555 |
+
{
|
| 556 |
+
// No need to worry about synchronization with the device: cudaFree is
|
| 557 |
+
// blocking and will synchronize across all kernels executing
|
| 558 |
+
// on the current device
|
| 559 |
+
|
| 560 |
+
// Free device memory and destroy stream event.
|
| 561 |
+
error = CubDebug(cudaFree(block_itr->d_ptr));
|
| 562 |
+
if (cudaSuccess != error)
|
| 563 |
+
{
|
| 564 |
+
break;
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
error = CubDebug(cudaEventDestroy(block_itr->ready_event));
|
| 568 |
+
if (cudaSuccess != error)
|
| 569 |
+
{
|
| 570 |
+
break;
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
// Reduce balance and erase entry
|
| 574 |
+
cached_bytes[device].free -= block_itr->bytes;
|
| 575 |
+
|
| 576 |
+
if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n",
|
| 577 |
+
device, (long long) block_itr->bytes, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
|
| 578 |
+
|
| 579 |
+
block_itr = cached_blocks.erase(block_itr);
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
// Unlock
|
| 583 |
+
mutex.unlock();
|
| 584 |
+
|
| 585 |
+
// Return under error
|
| 586 |
+
if (error) return error;
|
| 587 |
+
|
| 588 |
+
// Try to allocate again
|
| 589 |
+
error = CubDebug(cudaMalloc(&search_key.d_ptr, search_key.bytes));
|
| 590 |
+
if (cudaSuccess != error)
|
| 591 |
+
{
|
| 592 |
+
return error;
|
| 593 |
+
}
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
// Create ready event
|
| 597 |
+
error =
|
| 598 |
+
CubDebug(cudaEventCreateWithFlags(&search_key.ready_event, cudaEventDisableTiming));
|
| 599 |
+
|
| 600 |
+
if (cudaSuccess != error)
|
| 601 |
+
{
|
| 602 |
+
return error;
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
// Insert into live blocks
|
| 606 |
+
mutex.lock();
|
| 607 |
+
live_blocks.insert(search_key);
|
| 608 |
+
cached_bytes[device].live += search_key.bytes;
|
| 609 |
+
mutex.unlock();
|
| 610 |
+
|
| 611 |
+
if (debug) _CubLog("\tDevice %d allocated new device block at %p (%lld bytes associated with stream %lld).\n",
|
| 612 |
+
device, search_key.d_ptr, (long long) search_key.bytes, (long long) search_key.associated_stream);
|
| 613 |
+
|
| 614 |
+
// Attempt to revert back to previous device if necessary
|
| 615 |
+
if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device))
|
| 616 |
+
{
|
| 617 |
+
error = CubDebug(cudaSetDevice(entrypoint_device));
|
| 618 |
+
if (cudaSuccess != error)
|
| 619 |
+
{
|
| 620 |
+
return error;
|
| 621 |
+
}
|
| 622 |
+
}
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
// Copy device pointer to output parameter
|
| 626 |
+
*d_ptr = search_key.d_ptr;
|
| 627 |
+
|
| 628 |
+
if (debug) _CubLog("\t\t%lld available blocks cached (%lld bytes), %lld live blocks outstanding(%lld bytes).\n",
|
| 629 |
+
(long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
|
| 630 |
+
|
| 631 |
+
return error;
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
/**
|
| 635 |
+
* @brief Provides a suitable allocation of device memory for the given size on the current
|
| 636 |
+
* device.
|
| 637 |
+
*
|
| 638 |
+
* Once freed, the allocation becomes available immediately for reuse within the @p
|
| 639 |
+
* active_stream with which it was associated with during allocation, and it becomes available
|
| 640 |
+
* for reuse within other streams when all prior work submitted to @p active_stream has
|
| 641 |
+
* completed.
|
| 642 |
+
*
|
| 643 |
+
* @param[out] d_ptr
|
| 644 |
+
* Reference to pointer to the allocation
|
| 645 |
+
*
|
| 646 |
+
* @param[in] bytes
|
| 647 |
+
* Minimum number of bytes for the allocation
|
| 648 |
+
*
|
| 649 |
+
* @param[in] active_stream
|
| 650 |
+
* The stream to be associated with this allocation
|
| 651 |
+
*/
|
| 652 |
+
cudaError_t DeviceAllocate(void **d_ptr, size_t bytes, cudaStream_t active_stream = 0)
|
| 653 |
+
{
|
| 654 |
+
return DeviceAllocate(INVALID_DEVICE_ORDINAL, d_ptr, bytes, active_stream);
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
/**
|
| 658 |
+
* @brief Frees a live allocation of device memory on the specified device, returning it to the
|
| 659 |
+
* allocator.
|
| 660 |
+
*
|
| 661 |
+
* Once freed, the allocation becomes available immediately for reuse within the
|
| 662 |
+
* @p active_stream with which it was associated with during allocation, and it becomes
|
| 663 |
+
* available for reuse within other streams when all prior work submitted to @p active_stream
|
| 664 |
+
* has completed.
|
| 665 |
+
*/
|
| 666 |
+
cudaError_t DeviceFree(
|
| 667 |
+
int device,
|
| 668 |
+
void* d_ptr)
|
| 669 |
+
{
|
| 670 |
+
int entrypoint_device = INVALID_DEVICE_ORDINAL;
|
| 671 |
+
cudaError_t error = cudaSuccess;
|
| 672 |
+
|
| 673 |
+
if (device == INVALID_DEVICE_ORDINAL)
|
| 674 |
+
{
|
| 675 |
+
error = CubDebug(cudaGetDevice(&entrypoint_device));
|
| 676 |
+
if (cudaSuccess != error)
|
| 677 |
+
{
|
| 678 |
+
return error;
|
| 679 |
+
}
|
| 680 |
+
device = entrypoint_device;
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
// Lock
|
| 684 |
+
mutex.lock();
|
| 685 |
+
|
| 686 |
+
// Find corresponding block descriptor
|
| 687 |
+
bool recached = false;
|
| 688 |
+
BlockDescriptor search_key(d_ptr, device);
|
| 689 |
+
BusyBlocks::iterator block_itr = live_blocks.find(search_key);
|
| 690 |
+
if (block_itr != live_blocks.end())
|
| 691 |
+
{
|
| 692 |
+
// Remove from live blocks
|
| 693 |
+
search_key = *block_itr;
|
| 694 |
+
live_blocks.erase(block_itr);
|
| 695 |
+
cached_bytes[device].live -= search_key.bytes;
|
| 696 |
+
|
| 697 |
+
// Keep the returned allocation if bin is valid and we won't exceed the max cached threshold
|
| 698 |
+
if ((search_key.bin != INVALID_BIN) && (cached_bytes[device].free + search_key.bytes <= max_cached_bytes))
|
| 699 |
+
{
|
| 700 |
+
// Insert returned allocation into free blocks
|
| 701 |
+
recached = true;
|
| 702 |
+
cached_blocks.insert(search_key);
|
| 703 |
+
cached_bytes[device].free += search_key.bytes;
|
| 704 |
+
|
| 705 |
+
if (debug) _CubLog("\tDevice %d returned %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks outstanding. (%lld bytes)\n",
|
| 706 |
+
device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(),
|
| 707 |
+
(long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
|
| 708 |
+
}
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
// Unlock
|
| 712 |
+
mutex.unlock();
|
| 713 |
+
|
| 714 |
+
// First set to specified device (entrypoint may not be set)
|
| 715 |
+
if (device != entrypoint_device)
|
| 716 |
+
{
|
| 717 |
+
error = CubDebug(cudaGetDevice(&entrypoint_device));
|
| 718 |
+
if (cudaSuccess != error)
|
| 719 |
+
{
|
| 720 |
+
return error;
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
error = CubDebug(cudaSetDevice(device));
|
| 724 |
+
if (cudaSuccess != error)
|
| 725 |
+
{
|
| 726 |
+
return error;
|
| 727 |
+
}
|
| 728 |
+
}
|
| 729 |
+
|
| 730 |
+
if (recached)
|
| 731 |
+
{
|
| 732 |
+
// Insert the ready event in the associated stream (must have current device set properly)
|
| 733 |
+
error = CubDebug(cudaEventRecord(search_key.ready_event, search_key.associated_stream));
|
| 734 |
+
if (cudaSuccess != error)
|
| 735 |
+
{
|
| 736 |
+
return error;
|
| 737 |
+
}
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
if (!recached)
|
| 741 |
+
{
|
| 742 |
+
// Free the allocation from the runtime and cleanup the event.
|
| 743 |
+
error = CubDebug(cudaFree(d_ptr));
|
| 744 |
+
if (cudaSuccess != error)
|
| 745 |
+
{
|
| 746 |
+
return error;
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
error = CubDebug(cudaEventDestroy(search_key.ready_event));
|
| 750 |
+
if (cudaSuccess != error)
|
| 751 |
+
{
|
| 752 |
+
return error;
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
if (debug) _CubLog("\tDevice %d freed %lld bytes from associated stream %lld.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n",
|
| 756 |
+
device, (long long) search_key.bytes, (long long) search_key.associated_stream, (long long) cached_blocks.size(), (long long) cached_bytes[device].free, (long long) live_blocks.size(), (long long) cached_bytes[device].live);
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
// Reset device
|
| 760 |
+
if ((entrypoint_device != INVALID_DEVICE_ORDINAL) && (entrypoint_device != device))
|
| 761 |
+
{
|
| 762 |
+
error = CubDebug(cudaSetDevice(entrypoint_device));
|
| 763 |
+
if (cudaSuccess != error)
|
| 764 |
+
{
|
| 765 |
+
return error;
|
| 766 |
+
}
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
return error;
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
/**
|
| 773 |
+
* @brief Frees a live allocation of device memory on the current device, returning it to the
|
| 774 |
+
* allocator.
|
| 775 |
+
*
|
| 776 |
+
* Once freed, the allocation becomes available immediately for reuse within the @p
|
| 777 |
+
* active_stream with which it was associated with during allocation, and it becomes available
|
| 778 |
+
* for reuse within other streams when all prior work submitted to @p active_stream has
|
| 779 |
+
* completed.
|
| 780 |
+
*/
|
| 781 |
+
cudaError_t DeviceFree(
|
| 782 |
+
void* d_ptr)
|
| 783 |
+
{
|
| 784 |
+
return DeviceFree(INVALID_DEVICE_ORDINAL, d_ptr);
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
|
| 788 |
+
/**
|
| 789 |
+
* @brief Frees all cached device allocations on all devices
|
| 790 |
+
*/
|
| 791 |
+
cudaError_t FreeAllCached()
|
| 792 |
+
{
|
| 793 |
+
cudaError_t error = cudaSuccess;
|
| 794 |
+
int entrypoint_device = INVALID_DEVICE_ORDINAL;
|
| 795 |
+
int current_device = INVALID_DEVICE_ORDINAL;
|
| 796 |
+
|
| 797 |
+
mutex.lock();
|
| 798 |
+
|
| 799 |
+
while (!cached_blocks.empty())
|
| 800 |
+
{
|
| 801 |
+
// Get first block
|
| 802 |
+
CachedBlocks::iterator begin = cached_blocks.begin();
|
| 803 |
+
|
| 804 |
+
// Get entry-point device ordinal if necessary
|
| 805 |
+
if (entrypoint_device == INVALID_DEVICE_ORDINAL)
|
| 806 |
+
{
|
| 807 |
+
error = CubDebug(cudaGetDevice(&entrypoint_device));
|
| 808 |
+
if (cudaSuccess != error)
|
| 809 |
+
{
|
| 810 |
+
break;
|
| 811 |
+
}
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
// Set current device ordinal if necessary
|
| 815 |
+
if (begin->device != current_device)
|
| 816 |
+
{
|
| 817 |
+
error = CubDebug(cudaSetDevice(begin->device));
|
| 818 |
+
if (cudaSuccess != error)
|
| 819 |
+
{
|
| 820 |
+
break;
|
| 821 |
+
}
|
| 822 |
+
current_device = begin->device;
|
| 823 |
+
}
|
| 824 |
+
|
| 825 |
+
// Free device memory
|
| 826 |
+
error = CubDebug(cudaFree(begin->d_ptr));
|
| 827 |
+
if (cudaSuccess != error)
|
| 828 |
+
{
|
| 829 |
+
break;
|
| 830 |
+
}
|
| 831 |
+
|
| 832 |
+
error = CubDebug(cudaEventDestroy(begin->ready_event));
|
| 833 |
+
if (cudaSuccess != error)
|
| 834 |
+
{
|
| 835 |
+
break;
|
| 836 |
+
}
|
| 837 |
+
|
| 838 |
+
// Reduce balance and erase entry
|
| 839 |
+
const size_t block_bytes = begin->bytes;
|
| 840 |
+
cached_bytes[current_device].free -= block_bytes;
|
| 841 |
+
cached_blocks.erase(begin);
|
| 842 |
+
|
| 843 |
+
if (debug) _CubLog("\tDevice %d freed %lld bytes.\n\t\t %lld available blocks cached (%lld bytes), %lld live blocks (%lld bytes) outstanding.\n",
|
| 844 |
+
current_device, (long long) block_bytes, (long long) cached_blocks.size(), (long long) cached_bytes[current_device].free, (long long) live_blocks.size(), (long long) cached_bytes[current_device].live);
|
| 845 |
+
|
| 846 |
+
}
|
| 847 |
+
|
| 848 |
+
mutex.unlock();
|
| 849 |
+
|
| 850 |
+
// Attempt to revert back to entry-point device if necessary
|
| 851 |
+
if (entrypoint_device != INVALID_DEVICE_ORDINAL)
|
| 852 |
+
{
|
| 853 |
+
error = CubDebug(cudaSetDevice(entrypoint_device));
|
| 854 |
+
if (cudaSuccess != error)
|
| 855 |
+
{
|
| 856 |
+
return error;
|
| 857 |
+
}
|
| 858 |
+
}
|
| 859 |
+
|
| 860 |
+
return error;
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
/**
|
| 865 |
+
* @brief Destructor
|
| 866 |
+
*/
|
| 867 |
+
virtual ~CachingDeviceAllocator()
|
| 868 |
+
{
|
| 869 |
+
if (!skip_cleanup)
|
| 870 |
+
FreeAllCached();
|
| 871 |
+
}
|
| 872 |
+
|
| 873 |
+
};
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
/** @} */ // end group UtilMgmt
|
| 879 |
+
|
| 880 |
+
CUB_NAMESPACE_END
|
miniCUDA124/include/cub/util_arch.cuh
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2011, Duane Merrill. All rights reserved.
|
| 3 |
+
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
* * Redistributions of source code must retain the above copyright
|
| 8 |
+
* notice, this list of conditions and the following disclaimer.
|
| 9 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 10 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 11 |
+
* documentation and/or other materials provided with the distribution.
|
| 12 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 13 |
+
* names of its contributors may be used to endorse or promote products
|
| 14 |
+
* derived from this software without specific prior written permission.
|
| 15 |
+
*
|
| 16 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 17 |
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 18 |
+
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 19 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 20 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 21 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 22 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 23 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 24 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 25 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 26 |
+
*
|
| 27 |
+
******************************************************************************/
|
| 28 |
+
|
| 29 |
+
/**
|
| 30 |
+
* \file
|
| 31 |
+
* Static architectural properties by SM version.
|
| 32 |
+
*/
|
| 33 |
+
|
| 34 |
+
#pragma once
|
| 35 |
+
|
| 36 |
+
#include <cub/config.cuh>
|
| 37 |
+
|
| 38 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 39 |
+
# pragma GCC system_header
|
| 40 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 41 |
+
# pragma clang system_header
|
| 42 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 43 |
+
# pragma system_header
|
| 44 |
+
#endif // no system header
|
| 45 |
+
|
| 46 |
+
#include <cub/util_cpp_dialect.cuh>
|
| 47 |
+
#include <cub/util_macro.cuh>
|
| 48 |
+
#include <cub/util_namespace.cuh>
|
| 49 |
+
|
| 50 |
+
// Legacy include; this functionality used to be defined in here.
|
| 51 |
+
#include <cub/detail/detect_cuda_runtime.cuh>
|
| 52 |
+
|
| 53 |
+
CUB_NAMESPACE_BEGIN
|
| 54 |
+
|
| 55 |
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
|
| 56 |
+
|
| 57 |
+
// \deprecated [Since 2.1.0]
|
| 58 |
+
#define CUB_USE_COOPERATIVE_GROUPS
|
| 59 |
+
|
| 60 |
+
/// In device code, CUB_PTX_ARCH expands to the PTX version for which we are
|
| 61 |
+
/// compiling. In host code, CUB_PTX_ARCH's value is implementation defined.
|
| 62 |
+
#ifndef CUB_PTX_ARCH
|
| 63 |
+
#if defined(_NVHPC_CUDA)
|
| 64 |
+
// __NVCOMPILER_CUDA_ARCH__ is the target PTX version, and is defined
|
| 65 |
+
// when compiling both host code and device code. Currently, only one
|
| 66 |
+
// PTX version can be targeted.
|
| 67 |
+
#define CUB_PTX_ARCH __NVCOMPILER_CUDA_ARCH__
|
| 68 |
+
#elif !defined(__CUDA_ARCH__)
|
| 69 |
+
#define CUB_PTX_ARCH 0
|
| 70 |
+
#else
|
| 71 |
+
#define CUB_PTX_ARCH __CUDA_ARCH__
|
| 72 |
+
#endif
|
| 73 |
+
#endif
|
| 74 |
+
|
| 75 |
+
// These definitions were intended for internal use only and are now obsolete.
|
| 76 |
+
// If you relied on them, consider porting your code to use the functionality
|
| 77 |
+
// in libcu++'s <nv/target> header.
|
| 78 |
+
// For a temporary workaround, define CUB_PROVIDE_LEGACY_ARCH_MACROS to make
|
| 79 |
+
// them available again. These should be considered deprecated and will be
|
| 80 |
+
// fully removed in a future version.
|
| 81 |
+
#ifdef CUB_PROVIDE_LEGACY_ARCH_MACROS
|
| 82 |
+
#ifndef CUB_IS_DEVICE_CODE
|
| 83 |
+
#if defined(_NVHPC_CUDA)
|
| 84 |
+
#define CUB_IS_DEVICE_CODE __builtin_is_device_code()
|
| 85 |
+
#define CUB_IS_HOST_CODE (!__builtin_is_device_code())
|
| 86 |
+
#define CUB_INCLUDE_DEVICE_CODE 1
|
| 87 |
+
#define CUB_INCLUDE_HOST_CODE 1
|
| 88 |
+
#elif CUB_PTX_ARCH > 0
|
| 89 |
+
#define CUB_IS_DEVICE_CODE 1
|
| 90 |
+
#define CUB_IS_HOST_CODE 0
|
| 91 |
+
#define CUB_INCLUDE_DEVICE_CODE 1
|
| 92 |
+
#define CUB_INCLUDE_HOST_CODE 0
|
| 93 |
+
#else
|
| 94 |
+
#define CUB_IS_DEVICE_CODE 0
|
| 95 |
+
#define CUB_IS_HOST_CODE 1
|
| 96 |
+
#define CUB_INCLUDE_DEVICE_CODE 0
|
| 97 |
+
#define CUB_INCLUDE_HOST_CODE 1
|
| 98 |
+
#endif
|
| 99 |
+
#endif
|
| 100 |
+
#endif // CUB_PROVIDE_LEGACY_ARCH_MACROS
|
| 101 |
+
|
| 102 |
+
/// Maximum number of devices supported.
|
| 103 |
+
#ifndef CUB_MAX_DEVICES
|
| 104 |
+
#define CUB_MAX_DEVICES (128)
|
| 105 |
+
#endif
|
| 106 |
+
|
| 107 |
+
static_assert(CUB_MAX_DEVICES > 0, "CUB_MAX_DEVICES must be greater than 0.");
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
/// Number of threads per warp
|
| 111 |
+
#ifndef CUB_LOG_WARP_THREADS
|
| 112 |
+
#define CUB_LOG_WARP_THREADS(unused) (5)
|
| 113 |
+
#define CUB_WARP_THREADS(unused) (1 << CUB_LOG_WARP_THREADS(0))
|
| 114 |
+
|
| 115 |
+
#define CUB_PTX_WARP_THREADS CUB_WARP_THREADS(0)
|
| 116 |
+
#define CUB_PTX_LOG_WARP_THREADS CUB_LOG_WARP_THREADS(0)
|
| 117 |
+
#endif
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
/// Number of smem banks
|
| 121 |
+
#ifndef CUB_LOG_SMEM_BANKS
|
| 122 |
+
#define CUB_LOG_SMEM_BANKS(unused) (5)
|
| 123 |
+
#define CUB_SMEM_BANKS(unused) (1 << CUB_LOG_SMEM_BANKS(0))
|
| 124 |
+
|
| 125 |
+
#define CUB_PTX_LOG_SMEM_BANKS CUB_LOG_SMEM_BANKS(0)
|
| 126 |
+
#define CUB_PTX_SMEM_BANKS CUB_SMEM_BANKS
|
| 127 |
+
#endif
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
/// Oversubscription factor
|
| 131 |
+
#ifndef CUB_SUBSCRIPTION_FACTOR
|
| 132 |
+
#define CUB_SUBSCRIPTION_FACTOR(unused) (5)
|
| 133 |
+
#define CUB_PTX_SUBSCRIPTION_FACTOR CUB_SUBSCRIPTION_FACTOR(0)
|
| 134 |
+
#endif
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
/// Prefer padding overhead vs X-way conflicts greater than this threshold
|
| 138 |
+
#ifndef CUB_PREFER_CONFLICT_OVER_PADDING
|
| 139 |
+
#define CUB_PREFER_CONFLICT_OVER_PADDING(unused) (1)
|
| 140 |
+
#define CUB_PTX_PREFER_CONFLICT_OVER_PADDING CUB_PREFER_CONFLICT_OVER_PADDING(0)
|
| 141 |
+
#endif
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
template <
|
| 145 |
+
int NOMINAL_4B_BLOCK_THREADS,
|
| 146 |
+
int NOMINAL_4B_ITEMS_PER_THREAD,
|
| 147 |
+
typename T>
|
| 148 |
+
struct RegBoundScaling
|
| 149 |
+
{
|
| 150 |
+
enum {
|
| 151 |
+
ITEMS_PER_THREAD = CUB_MAX(1, NOMINAL_4B_ITEMS_PER_THREAD * 4 / CUB_MAX(4, sizeof(T))),
|
| 152 |
+
BLOCK_THREADS = CUB_MIN(NOMINAL_4B_BLOCK_THREADS, (((1024 * 48) / (sizeof(T) * ITEMS_PER_THREAD)) + 31) / 32 * 32),
|
| 153 |
+
};
|
| 154 |
+
};
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
template <
|
| 158 |
+
int NOMINAL_4B_BLOCK_THREADS,
|
| 159 |
+
int NOMINAL_4B_ITEMS_PER_THREAD,
|
| 160 |
+
typename T>
|
| 161 |
+
struct MemBoundScaling
|
| 162 |
+
{
|
| 163 |
+
enum {
|
| 164 |
+
ITEMS_PER_THREAD = CUB_MAX(1, CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T), NOMINAL_4B_ITEMS_PER_THREAD * 2)),
|
| 165 |
+
BLOCK_THREADS = CUB_MIN(NOMINAL_4B_BLOCK_THREADS, (((1024 * 48) / (sizeof(T) * ITEMS_PER_THREAD)) + 31) / 32 * 32),
|
| 166 |
+
};
|
| 167 |
+
};
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
#endif // Do not document
|
| 173 |
+
|
| 174 |
+
CUB_NAMESPACE_END
|
miniCUDA124/include/cub/util_compiler.cuh
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Redistribution and use in source and binary forms, with or without
|
| 5 |
+
* modification, are permitted provided that the following conditions are met:
|
| 6 |
+
* * Redistributions of source code must retain the above copyright
|
| 7 |
+
* notice, this list of conditions and the following disclaimer.
|
| 8 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 9 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 10 |
+
* documentation and/or other materials provided with the distribution.
|
| 11 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 12 |
+
* names of its contributors may be used to endorse or promote products
|
| 13 |
+
* derived from this software without specific prior written permission.
|
| 14 |
+
*
|
| 15 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 16 |
+
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 17 |
+
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 18 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 19 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 20 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 21 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 22 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 23 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 24 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 25 |
+
*
|
| 26 |
+
******************************************************************************/
|
| 27 |
+
|
| 28 |
+
/**
|
| 29 |
+
* \file
|
| 30 |
+
* Detect compiler information.
|
| 31 |
+
*/
|
| 32 |
+
|
| 33 |
+
#pragma once
|
| 34 |
+
|
| 35 |
+
// For _CCCL_IMPLICIT_SYSTEM_HEADER
|
| 36 |
+
#include <cuda/__cccl_config>
|
| 37 |
+
|
| 38 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 39 |
+
# pragma GCC system_header
|
| 40 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 41 |
+
# pragma clang system_header
|
| 42 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 43 |
+
# pragma system_header
|
| 44 |
+
#endif // no system header
|
| 45 |
+
|
| 46 |
+
// enumerate host compilers we know about
|
| 47 |
+
#define CUB_HOST_COMPILER_UNKNOWN 0
|
| 48 |
+
#define CUB_HOST_COMPILER_MSVC 1
|
| 49 |
+
#define CUB_HOST_COMPILER_GCC 2
|
| 50 |
+
#define CUB_HOST_COMPILER_CLANG 3
|
| 51 |
+
|
| 52 |
+
// enumerate device compilers we know about
|
| 53 |
+
#define CUB_DEVICE_COMPILER_UNKNOWN 0
|
| 54 |
+
#define CUB_DEVICE_COMPILER_MSVC 1
|
| 55 |
+
#define CUB_DEVICE_COMPILER_GCC 2
|
| 56 |
+
#define CUB_DEVICE_COMPILER_NVCC 3
|
| 57 |
+
#define CUB_DEVICE_COMPILER_CLANG 4
|
| 58 |
+
|
| 59 |
+
// figure out which host compiler we're using
|
| 60 |
+
#if defined(_MSC_VER)
|
| 61 |
+
# define CUB_HOST_COMPILER CUB_HOST_COMPILER_MSVC
|
| 62 |
+
# define CUB_MSVC_VERSION _MSC_VER
|
| 63 |
+
# define CUB_MSVC_VERSION_FULL _MSC_FULL_VER
|
| 64 |
+
#elif defined(__clang__)
|
| 65 |
+
# define CUB_HOST_COMPILER CUB_HOST_COMPILER_CLANG
|
| 66 |
+
# define CUB_CLANG_VERSION \
|
| 67 |
+
(__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
|
| 68 |
+
#elif defined(__GNUC__)
|
| 69 |
+
# define CUB_HOST_COMPILER CUB_HOST_COMPILER_GCC
|
| 70 |
+
# define CUB_GCC_VERSION \
|
| 71 |
+
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
|
| 72 |
+
#else
|
| 73 |
+
# define CUB_HOST_COMPILER CUB_HOST_COMPILER_UNKNOWN
|
| 74 |
+
#endif // CUB_HOST_COMPILER
|
| 75 |
+
|
| 76 |
+
// figure out which device compiler we're using
|
| 77 |
+
#if defined(__CUDACC__) || defined(_NVHPC_CUDA)
|
| 78 |
+
# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_NVCC
|
| 79 |
+
#elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC
|
| 80 |
+
# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_MSVC
|
| 81 |
+
#elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_GCC
|
| 82 |
+
# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_GCC
|
| 83 |
+
#elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_CLANG
|
| 84 |
+
// CUDA-capable clang should behave similar to NVCC.
|
| 85 |
+
# if defined(__CUDA__)
|
| 86 |
+
# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_NVCC
|
| 87 |
+
# else
|
| 88 |
+
# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_CLANG
|
| 89 |
+
# endif
|
| 90 |
+
#else
|
| 91 |
+
# define CUB_DEVICE_COMPILER CUB_DEVICE_COMPILER_UNKNOWN
|
| 92 |
+
#endif
|
miniCUDA124/include/cub/util_cpp_dialect.cuh
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Redistribution and use in source and binary forms, with or without
|
| 5 |
+
* modification, are permitted provided that the following conditions are met:
|
| 6 |
+
* * Redistributions of source code must retain the above copyright
|
| 7 |
+
* notice, this list of conditions and the following disclaimer.
|
| 8 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 9 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 10 |
+
* documentation and/or other materials provided with the distribution.
|
| 11 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 12 |
+
* names of its contributors may be used to endorse or promote products
|
| 13 |
+
* derived from this software without specific prior written permission.
|
| 14 |
+
*
|
| 15 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 16 |
+
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 17 |
+
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 18 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 19 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 20 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 21 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 22 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 23 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 24 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 25 |
+
*
|
| 26 |
+
******************************************************************************/
|
| 27 |
+
|
| 28 |
+
/*! \file
|
| 29 |
+
* \brief Detect the version of the C++ standard used by the compiler.
|
| 30 |
+
*/
|
| 31 |
+
|
| 32 |
+
#pragma once
|
| 33 |
+
|
| 34 |
+
#include <cuda/__cccl_config>
|
| 35 |
+
|
| 36 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 37 |
+
# pragma GCC system_header
|
| 38 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 39 |
+
# pragma clang system_header
|
| 40 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 41 |
+
# pragma system_header
|
| 42 |
+
#endif // no system header
|
| 43 |
+
|
| 44 |
+
#include <cub/util_compiler.cuh>
|
| 45 |
+
|
| 46 |
+
// Deprecation warnings may be silenced by defining the following macros. These
|
| 47 |
+
// may be combined.
|
| 48 |
+
// - CUB_IGNORE_DEPRECATED_CPP_DIALECT:
|
| 49 |
+
// Ignore all deprecated C++ dialects and outdated compilers.
|
| 50 |
+
// - CUB_IGNORE_DEPRECATED_CPP_11:
|
| 51 |
+
// Ignore deprecation warnings when compiling with C++11. C++03 and outdated
|
| 52 |
+
// compilers will still issue warnings.
|
| 53 |
+
// - CUB_IGNORE_DEPRECATED_COMPILER
|
| 54 |
+
// Ignore deprecation warnings when using deprecated compilers. Compiling
|
| 55 |
+
// with C++03 and C++11 will still issue warnings.
|
| 56 |
+
|
| 57 |
+
// Check for the thrust opt-outs as well:
|
| 58 |
+
#if !defined(CUB_IGNORE_DEPRECATED_CPP_DIALECT) && \
|
| 59 |
+
defined(THRUST_IGNORE_DEPRECATED_CPP_DIALECT)
|
| 60 |
+
# define CUB_IGNORE_DEPRECATED_CPP_DIALECT
|
| 61 |
+
#endif
|
| 62 |
+
#if !defined(CUB_IGNORE_DEPRECATED_CPP_11) && \
|
| 63 |
+
defined(THRUST_IGNORE_DEPRECATED_CPP_11)
|
| 64 |
+
# define CUB_IGNORE_DEPRECATED_CPP_11
|
| 65 |
+
#endif
|
| 66 |
+
#if !defined(CUB_IGNORE_DEPRECATED_COMPILER) && \
|
| 67 |
+
defined(THRUST_IGNORE_DEPRECATED_COMPILER)
|
| 68 |
+
# define CUB_IGNORE_DEPRECATED_COMPILER
|
| 69 |
+
#endif
|
| 70 |
+
|
| 71 |
+
#ifdef CUB_IGNORE_DEPRECATED_CPP_DIALECT
|
| 72 |
+
# define CUB_IGNORE_DEPRECATED_CPP_11
|
| 73 |
+
# define CUB_IGNORE_DEPRECATED_COMPILER
|
| 74 |
+
#endif
|
| 75 |
+
|
| 76 |
+
// Define this to override the built-in detection.
|
| 77 |
+
#ifndef CUB_CPP_DIALECT
|
| 78 |
+
|
| 79 |
+
// MSVC does not define __cplusplus correctly. _MSVC_LANG is used instead.
|
| 80 |
+
// This macro is only defined in MSVC 2015U3+.
|
| 81 |
+
# ifdef _MSVC_LANG // Do not replace with CUB_HOST_COMPILER test (see above)
|
| 82 |
+
// MSVC2015 reports C++14 but lacks extended constexpr support. Treat as C++11.
|
| 83 |
+
# if CUB_MSVC_VERSION < 1910 && _MSVC_LANG > 201103L /* MSVC < 2017 && CPP > 2011 */
|
| 84 |
+
# define CUB_CPLUSPLUS 201103L /* Fix to 2011 */
|
| 85 |
+
# else
|
| 86 |
+
# define CUB_CPLUSPLUS _MSVC_LANG /* We'll trust this for now. */
|
| 87 |
+
# endif // MSVC 2015 C++14 fix
|
| 88 |
+
# else
|
| 89 |
+
# define CUB_CPLUSPLUS __cplusplus
|
| 90 |
+
# endif
|
| 91 |
+
|
| 92 |
+
// Detect current dialect:
|
| 93 |
+
# if CUB_CPLUSPLUS < 201103L
|
| 94 |
+
# define CUB_CPP_DIALECT 2003
|
| 95 |
+
# elif CUB_CPLUSPLUS < 201402L
|
| 96 |
+
# define CUB_CPP_DIALECT 2011
|
| 97 |
+
# elif CUB_CPLUSPLUS < 201703L
|
| 98 |
+
# define CUB_CPP_DIALECT 2014
|
| 99 |
+
# elif CUB_CPLUSPLUS == 201703L
|
| 100 |
+
# define CUB_CPP_DIALECT 2017
|
| 101 |
+
# elif CUB_CPLUSPLUS > 201703L // unknown, but is higher than 2017.
|
| 102 |
+
# define CUB_CPP_DIALECT 2020
|
| 103 |
+
# endif
|
| 104 |
+
|
| 105 |
+
# undef CUB_CPLUSPLUS // cleanup
|
| 106 |
+
|
| 107 |
+
#endif // !CUB_CPP_DIALECT
|
| 108 |
+
|
| 109 |
+
// Define CUB_COMPILER_DEPRECATION macro:
|
| 110 |
+
#if CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC
|
| 111 |
+
# define CUB_COMP_DEPR_IMPL(msg) \
|
| 112 |
+
__pragma(message(__FILE__ ":" CUB_COMP_DEPR_IMPL0(__LINE__) ": warning: " #msg))
|
| 113 |
+
# define CUB_COMP_DEPR_IMPL0(x) CUB_COMP_DEPR_IMPL1(x)
|
| 114 |
+
# define CUB_COMP_DEPR_IMPL1(x) #x
|
| 115 |
+
#else // clang / gcc:
|
| 116 |
+
# define CUB_COMP_DEPR_IMPL(msg) CUB_COMP_DEPR_IMPL0(GCC warning #msg)
|
| 117 |
+
# define CUB_COMP_DEPR_IMPL0(expr) _Pragma(#expr)
|
| 118 |
+
# define CUB_COMP_DEPR_IMPL1 /* intentionally blank */
|
| 119 |
+
#endif
|
| 120 |
+
|
| 121 |
+
#define CUB_COMPILER_DEPRECATION(REQ) \
|
| 122 |
+
CUB_COMP_DEPR_IMPL(CUB requires at least REQ. Define CUB_IGNORE_DEPRECATED_COMPILER to suppress this message.)
|
| 123 |
+
|
| 124 |
+
#define CUB_COMPILER_DEPRECATION_SOFT(REQ, CUR) \
|
| 125 |
+
CUB_COMP_DEPR_IMPL(CUB requires at least REQ. CUR is deprecated but still supported. CUR support will be removed in a future release. Define CUB_IGNORE_DEPRECATED_CPP_DIALECT to suppress this message.)
|
| 126 |
+
|
| 127 |
+
#ifndef CUB_IGNORE_DEPRECATED_COMPILER
|
| 128 |
+
|
| 129 |
+
// Compiler checks:
|
| 130 |
+
# if CUB_HOST_COMPILER == CUB_HOST_COMPILER_GCC && CUB_GCC_VERSION < 50000
|
| 131 |
+
CUB_COMPILER_DEPRECATION(GCC 5.0);
|
| 132 |
+
# elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_CLANG && CUB_CLANG_VERSION < 70000
|
| 133 |
+
CUB_COMPILER_DEPRECATION(Clang 7.0);
|
| 134 |
+
# elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC && CUB_MSVC_VERSION < 1910
|
| 135 |
+
// <2017. Hard upgrade message:
|
| 136 |
+
CUB_COMPILER_DEPRECATION(MSVC 2019 (19.20/16.0/14.20));
|
| 137 |
+
# elif CUB_HOST_COMPILER == CUB_HOST_COMPILER_MSVC && CUB_MSVC_VERSION < 1920
|
| 138 |
+
// >=2017, <2019. Soft deprecation message:
|
| 139 |
+
CUB_COMPILER_DEPRECATION_SOFT(MSVC 2019 (19.20/16.0/14.20), MSVC 2017);
|
| 140 |
+
# endif
|
| 141 |
+
|
| 142 |
+
#endif // CUB_IGNORE_DEPRECATED_COMPILER
|
| 143 |
+
|
| 144 |
+
#ifndef CUB_IGNORE_DEPRECATED_DIALECT
|
| 145 |
+
|
| 146 |
+
// Dialect checks:
|
| 147 |
+
# if CUB_CPP_DIALECT < 2011
|
| 148 |
+
// <C++11. Hard upgrade message:
|
| 149 |
+
CUB_COMPILER_DEPRECATION(C++14);
|
| 150 |
+
# elif CUB_CPP_DIALECT == 2011 && !defined(CUB_IGNORE_DEPRECATED_CPP_11)
|
| 151 |
+
// =C++11. Soft upgrade message:
|
| 152 |
+
CUB_COMPILER_DEPRECATION_SOFT(C++14, C++11);
|
| 153 |
+
# endif
|
| 154 |
+
|
| 155 |
+
#endif // CUB_IGNORE_DEPRECATED_DIALECT
|
| 156 |
+
|
| 157 |
+
#undef CUB_COMPILER_DEPRECATION_SOFT
|
| 158 |
+
#undef CUB_COMPILER_DEPRECATION
|
| 159 |
+
#undef CUB_COMP_DEPR_IMPL
|
| 160 |
+
#undef CUB_COMP_DEPR_IMPL0
|
| 161 |
+
#undef CUB_COMP_DEPR_IMPL1
|
miniCUDA124/include/cub/util_debug.cuh
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2011, Duane Merrill. All rights reserved.
|
| 3 |
+
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* Redistribution and use in source and binary forms, with or without
|
| 6 |
+
* modification, are permitted provided that the following conditions are met:
|
| 7 |
+
* * Redistributions of source code must retain the above copyright
|
| 8 |
+
* notice, this list of conditions and the following disclaimer.
|
| 9 |
+
* * Redistributions in binary form must reproduce the above copyright
|
| 10 |
+
* notice, this list of conditions and the following disclaimer in the
|
| 11 |
+
* documentation and/or other materials provided with the distribution.
|
| 12 |
+
* * Neither the name of the NVIDIA CORPORATION nor the
|
| 13 |
+
* names of its contributors may be used to endorse or promote products
|
| 14 |
+
* derived from this software without specific prior written permission.
|
| 15 |
+
*
|
| 16 |
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 17 |
+
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 18 |
+
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 19 |
+
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
| 20 |
+
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 21 |
+
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 22 |
+
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 23 |
+
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 24 |
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 25 |
+
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 26 |
+
*
|
| 27 |
+
******************************************************************************/
|
| 28 |
+
|
| 29 |
+
/**
|
| 30 |
+
* \file
|
| 31 |
+
* Error and event logging routines.
|
| 32 |
+
*
|
| 33 |
+
* The following macros definitions are supported:
|
| 34 |
+
* - \p CUB_LOG. Simple event messages are printed to \p stdout.
|
| 35 |
+
*/
|
| 36 |
+
|
| 37 |
+
#pragma once
|
| 38 |
+
|
| 39 |
+
#include <cub/config.cuh>
|
| 40 |
+
|
| 41 |
+
#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
|
| 42 |
+
# pragma GCC system_header
|
| 43 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
|
| 44 |
+
# pragma clang system_header
|
| 45 |
+
#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
|
| 46 |
+
# pragma system_header
|
| 47 |
+
#endif // no system header
|
| 48 |
+
|
| 49 |
+
#include <nv/target>
|
| 50 |
+
|
| 51 |
+
#include <cstdio>
|
| 52 |
+
|
| 53 |
+
CUB_NAMESPACE_BEGIN
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
#ifdef DOXYGEN_SHOULD_SKIP_THIS // Only parse this during doxygen passes:
|
| 57 |
+
|
| 58 |
+
/**
|
| 59 |
+
* @def CUB_DEBUG_LOG
|
| 60 |
+
*
|
| 61 |
+
* Causes kernel launch configurations to be printed to the console
|
| 62 |
+
*/
|
| 63 |
+
#define CUB_DEBUG_LOG
|
| 64 |
+
|
| 65 |
+
/**
|
| 66 |
+
* @def CUB_DEBUG_SYNC
|
| 67 |
+
*
|
| 68 |
+
* Causes synchronization of the stream after every kernel launch to check
|
| 69 |
+
* for errors. Also causes kernel launch configurations to be printed to the
|
| 70 |
+
* console.
|
| 71 |
+
*/
|
| 72 |
+
#define CUB_DEBUG_SYNC
|
| 73 |
+
|
| 74 |
+
/**
|
| 75 |
+
* @def CUB_DEBUG_HOST_ASSERTIONS
|
| 76 |
+
*
|
| 77 |
+
* Extends `CUB_DEBUG_SYNC` effects by checking host-side precondition
|
| 78 |
+
* assertions.
|
| 79 |
+
*/
|
| 80 |
+
#define CUB_DEBUG_HOST_ASSERTIONS
|
| 81 |
+
|
| 82 |
+
/**
|
| 83 |
+
* @def CUB_DEBUG_DEVICE_ASSERTIONS
|
| 84 |
+
*
|
| 85 |
+
* Extends `CUB_DEBUG_HOST_ASSERTIONS` effects by checking device-side
|
| 86 |
+
* precondition assertions.
|
| 87 |
+
*/
|
| 88 |
+
#define CUB_DEBUG_DEVICE_ASSERTIONS
|
| 89 |
+
|
| 90 |
+
/**
|
| 91 |
+
* @def CUB_DEBUG_ALL
|
| 92 |
+
*
|
| 93 |
+
* Causes host and device-side precondition assertions to be checked. Apart
|
| 94 |
+
* from that, causes synchronization of the stream after every kernel launch to
|
| 95 |
+
* check for errors. Also causes kernel launch configurations to be printed to
|
| 96 |
+
* the console.
|
| 97 |
+
*/
|
| 98 |
+
#define CUB_DEBUG_ALL
|
| 99 |
+
|
| 100 |
+
#endif // DOXYGEN_SHOULD_SKIP_THIS
|
| 101 |
+
|
| 102 |
+
/**
|
| 103 |
+
* \addtogroup UtilMgmt
|
| 104 |
+
* @{
|
| 105 |
+
*/
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
// `CUB_DETAIL_DEBUG_LEVEL_*`: Implementation details, internal use only:
|
| 109 |
+
|
| 110 |
+
#define CUB_DETAIL_DEBUG_LEVEL_NONE 0
|
| 111 |
+
#define CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS_ONLY 1
|
| 112 |
+
#define CUB_DETAIL_DEBUG_LEVEL_LOG 2
|
| 113 |
+
#define CUB_DETAIL_DEBUG_LEVEL_SYNC 3
|
| 114 |
+
#define CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS 4
|
| 115 |
+
#define CUB_DETAIL_DEBUG_LEVEL_DEVICE_ASSERTIONS 5
|
| 116 |
+
#define CUB_DETAIL_DEBUG_LEVEL_ALL 1000
|
| 117 |
+
|
| 118 |
+
// `CUB_DEBUG_*`: User interfaces:
|
| 119 |
+
|
| 120 |
+
// Extra logging, no syncs
|
| 121 |
+
#ifdef CUB_DEBUG_LOG
|
| 122 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_LOG
|
| 123 |
+
#endif
|
| 124 |
+
|
| 125 |
+
// Logging + syncs
|
| 126 |
+
#ifdef CUB_DEBUG_SYNC
|
| 127 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_SYNC
|
| 128 |
+
#endif
|
| 129 |
+
|
| 130 |
+
// Logging + syncs + host assertions
|
| 131 |
+
#ifdef CUB_DEBUG_HOST_ASSERTIONS
|
| 132 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS
|
| 133 |
+
#endif
|
| 134 |
+
|
| 135 |
+
// Logging + syncs + host assertions + device assertions
|
| 136 |
+
#ifdef CUB_DEBUG_DEVICE_ASSERTIONS
|
| 137 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_DEVICE_ASSERTIONS
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
// All
|
| 141 |
+
#ifdef CUB_DEBUG_ALL
|
| 142 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_ALL
|
| 143 |
+
#endif
|
| 144 |
+
|
| 145 |
+
// Default case, no extra debugging:
|
| 146 |
+
#ifndef CUB_DETAIL_DEBUG_LEVEL
|
| 147 |
+
#ifdef NDEBUG
|
| 148 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_NONE
|
| 149 |
+
#else
|
| 150 |
+
#define CUB_DETAIL_DEBUG_LEVEL CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS_ONLY
|
| 151 |
+
#endif
|
| 152 |
+
#endif
|
| 153 |
+
|
| 154 |
+
/*
|
| 155 |
+
* `CUB_DETAIL_DEBUG_ENABLE_*`:
|
| 156 |
+
* Internal implementation details, used for testing enabled debug features:
|
| 157 |
+
*/
|
| 158 |
+
|
| 159 |
+
#if CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_LOG
|
| 160 |
+
#define CUB_DETAIL_DEBUG_ENABLE_LOG
|
| 161 |
+
#endif
|
| 162 |
+
|
| 163 |
+
#if CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_SYNC
|
| 164 |
+
#define CUB_DETAIL_DEBUG_ENABLE_SYNC
|
| 165 |
+
#endif
|
| 166 |
+
|
| 167 |
+
#if (CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS) || \
|
| 168 |
+
(CUB_DETAIL_DEBUG_LEVEL == CUB_DETAIL_DEBUG_LEVEL_HOST_ASSERTIONS_ONLY)
|
| 169 |
+
#define CUB_DETAIL_DEBUG_ENABLE_HOST_ASSERTIONS
|
| 170 |
+
#endif
|
| 171 |
+
|
| 172 |
+
#if CUB_DETAIL_DEBUG_LEVEL >= CUB_DETAIL_DEBUG_LEVEL_DEVICE_ASSERTIONS
|
| 173 |
+
#define CUB_DETAIL_DEBUG_ENABLE_DEVICE_ASSERTIONS
|
| 174 |
+
#endif
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
/// CUB error reporting macro (prints error messages to stderr)
|
| 178 |
+
#if (defined(DEBUG) || defined(_DEBUG)) && !defined(CUB_STDERR)
|
| 179 |
+
#define CUB_STDERR
|
| 180 |
+
#endif
|
| 181 |
+
|
| 182 |
+
/**
|
| 183 |
+
* \brief %If \p CUB_STDERR is defined and \p error is not \p cudaSuccess, the
|
| 184 |
+
* corresponding error message is printed to \p stderr (or \p stdout in device
|
| 185 |
+
* code) along with the supplied source context.
|
| 186 |
+
*
|
| 187 |
+
* \return The CUDA error.
|
| 188 |
+
*/
|
| 189 |
+
__host__ __device__
|
| 190 |
+
__forceinline__
|
| 191 |
+
cudaError_t Debug(cudaError_t error, const char *filename, int line)
|
| 192 |
+
{
|
| 193 |
+
// Clear the global CUDA error state which may have been set by the last
|
| 194 |
+
// call. Otherwise, errors may "leak" to unrelated kernel launches.
|
| 195 |
+
|
| 196 |
+
// clang-format off
|
| 197 |
+
#ifndef CUB_RDC_ENABLED
|
| 198 |
+
#define CUB_TEMP_DEVICE_CODE
|
| 199 |
+
#else
|
| 200 |
+
#define CUB_TEMP_DEVICE_CODE last_error = cudaGetLastError()
|
| 201 |
+
#endif
|
| 202 |
+
|
| 203 |
+
cudaError_t last_error = cudaSuccess;
|
| 204 |
+
|
| 205 |
+
NV_IF_TARGET(
|
| 206 |
+
NV_IS_HOST,
|
| 207 |
+
(last_error = cudaGetLastError();),
|
| 208 |
+
(CUB_TEMP_DEVICE_CODE;)
|
| 209 |
+
);
|
| 210 |
+
|
| 211 |
+
#undef CUB_TEMP_DEVICE_CODE
|
| 212 |
+
// clang-format on
|
| 213 |
+
|
| 214 |
+
if (error == cudaSuccess && last_error != cudaSuccess)
|
| 215 |
+
{
|
| 216 |
+
error = last_error;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
#ifdef CUB_STDERR
|
| 220 |
+
if (error)
|
| 221 |
+
{
|
| 222 |
+
NV_IF_TARGET(
|
| 223 |
+
NV_IS_HOST, (
|
| 224 |
+
fprintf(stderr,
|
| 225 |
+
"CUDA error %d [%s, %d]: %s\n",
|
| 226 |
+
error,
|
| 227 |
+
filename,
|
| 228 |
+
line,
|
| 229 |
+
cudaGetErrorString(error));
|
| 230 |
+
fflush(stderr);
|
| 231 |
+
),
|
| 232 |
+
(
|
| 233 |
+
printf("CUDA error %d [block (%d,%d,%d) thread (%d,%d,%d), %s, %d]\n",
|
| 234 |
+
error,
|
| 235 |
+
blockIdx.z,
|
| 236 |
+
blockIdx.y,
|
| 237 |
+
blockIdx.x,
|
| 238 |
+
threadIdx.z,
|
| 239 |
+
threadIdx.y,
|
| 240 |
+
threadIdx.x,
|
| 241 |
+
filename,
|
| 242 |
+
line);
|
| 243 |
+
)
|
| 244 |
+
);
|
| 245 |
+
}
|
| 246 |
+
#else
|
| 247 |
+
(void)filename;
|
| 248 |
+
(void)line;
|
| 249 |
+
#endif
|
| 250 |
+
|
| 251 |
+
return error;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/**
|
| 255 |
+
* \brief Debug macro
|
| 256 |
+
*/
|
| 257 |
+
#ifndef CubDebug
|
| 258 |
+
#define CubDebug(e) CUB_NS_QUALIFIER::Debug((cudaError_t) (e), __FILE__, __LINE__)
|
| 259 |
+
#endif
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
/**
|
| 263 |
+
* \brief Debug macro with exit
|
| 264 |
+
*/
|
| 265 |
+
#ifndef CubDebugExit
|
| 266 |
+
#define CubDebugExit(e) if (CUB_NS_QUALIFIER::Debug((cudaError_t) (e), __FILE__, __LINE__)) { exit(1); }
|
| 267 |
+
#endif
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
/**
|
| 271 |
+
* \brief Log macro for printf statements.
|
| 272 |
+
*/
|
| 273 |
+
#if !defined(_CubLog)
|
| 274 |
+
#if defined(_NVHPC_CUDA) || !(defined(__clang__) && defined(__CUDA__))
|
| 275 |
+
|
| 276 |
+
// NVCC / NVC++
|
| 277 |
+
#define _CubLog(format, ...) \
|
| 278 |
+
do \
|
| 279 |
+
{ \
|
| 280 |
+
NV_IF_TARGET(NV_IS_HOST, \
|
| 281 |
+
(printf(format, __VA_ARGS__);), \
|
| 282 |
+
(printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \
|
| 283 |
+
blockIdx.z, \
|
| 284 |
+
blockIdx.y, \
|
| 285 |
+
blockIdx.x, \
|
| 286 |
+
threadIdx.z, \
|
| 287 |
+
threadIdx.y, \
|
| 288 |
+
threadIdx.x, \
|
| 289 |
+
__VA_ARGS__);)); \
|
| 290 |
+
} while (false)
|
| 291 |
+
|
| 292 |
+
#else // Clang:
|
| 293 |
+
|
| 294 |
+
// XXX shameless hack for clang around variadic printf...
|
| 295 |
+
// Compilies w/o supplying -std=c++11 but shows warning,
|
| 296 |
+
// so we silence them :)
|
| 297 |
+
#pragma clang diagnostic ignored "-Wc++11-extensions"
|
| 298 |
+
#pragma clang diagnostic ignored "-Wunnamed-type-template-args"
|
| 299 |
+
template <class... Args>
|
| 300 |
+
inline __host__ __device__ void va_printf(char const *format,
|
| 301 |
+
Args const &...args)
|
| 302 |
+
{
|
| 303 |
+
#ifdef __CUDA_ARCH__
|
| 304 |
+
printf(format,
|
| 305 |
+
blockIdx.z,
|
| 306 |
+
blockIdx.y,
|
| 307 |
+
blockIdx.x,
|
| 308 |
+
threadIdx.z,
|
| 309 |
+
threadIdx.y,
|
| 310 |
+
threadIdx.x,
|
| 311 |
+
args...);
|
| 312 |
+
#else
|
| 313 |
+
printf(format, args...);
|
| 314 |
+
#endif
|
| 315 |
+
}
|
| 316 |
+
#ifndef __CUDA_ARCH__
|
| 317 |
+
#define _CubLog(format, ...) CUB_NS_QUALIFIER::va_printf(format, __VA_ARGS__);
|
| 318 |
+
#else
|
| 319 |
+
#define _CubLog(format, ...) \
|
| 320 |
+
CUB_NS_QUALIFIER::va_printf("[block (%d,%d,%d), thread " \
|
| 321 |
+
"(%d,%d,%d)]: " format, \
|
| 322 |
+
__VA_ARGS__);
|
| 323 |
+
#endif
|
| 324 |
+
#endif
|
| 325 |
+
#endif
|
| 326 |
+
|
| 327 |
+
/** @} */ // end group UtilMgmt
|
| 328 |
+
|
| 329 |
+
CUB_NAMESPACE_END
|
miniCUDA124/include/cuda/__cccl_config
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===----------------------------------------------------------------------===//
|
| 2 |
+
//
|
| 3 |
+
// Part of libcu++, the C++ Standard Library for your entire system,
|
| 4 |
+
// under the Apache License v2.0 with LLVM Exceptions.
|
| 5 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 6 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 7 |
+
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
|
| 8 |
+
//
|
| 9 |
+
//===----------------------------------------------------------------------===//
|
| 10 |
+
|
| 11 |
+
#ifndef _CUDA__CCCL_CONFIG
|
| 12 |
+
#define _CUDA__CCCL_CONFIG
|
| 13 |
+
|
| 14 |
+
#include "std/detail/libcxx/include/__cccl_config"
|
| 15 |
+
|
| 16 |
+
#endif // _CUDA__CCCL_CONFIG
|
miniCUDA124/include/cuda/atomic
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===----------------------------------------------------------------------===//
|
| 2 |
+
//
|
| 3 |
+
// Part of libcu++, the C++ Standard Library for your entire system,
|
| 4 |
+
// under the Apache License v2.0 with LLVM Exceptions.
|
| 5 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 6 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 7 |
+
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
|
| 8 |
+
//
|
| 9 |
+
//===----------------------------------------------------------------------===//
|
| 10 |
+
|
| 11 |
+
#ifndef _CUDA_ATOMIC
|
| 12 |
+
#define _CUDA_ATOMIC
|
| 13 |
+
|
| 14 |
+
#include "std/atomic"
|
| 15 |
+
|
| 16 |
+
#endif // _CUDA_ATOMIC
|
miniCUDA124/include/cuda/barrier
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===----------------------------------------------------------------------===//
|
| 2 |
+
//
|
| 3 |
+
// Part of libcu++, the C++ Standard Library for your entire system,
|
| 4 |
+
// under the Apache License v2.0 with LLVM Exceptions.
|
| 5 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 6 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 7 |
+
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
|
| 8 |
+
//
|
| 9 |
+
//===----------------------------------------------------------------------===//
|
| 10 |
+
|
| 11 |
+
#ifndef _CUDA_BARRIER
|
| 12 |
+
#define _CUDA_BARRIER
|
| 13 |
+
|
| 14 |
+
#include "std/barrier"
|
| 15 |
+
|
| 16 |
+
// Forward-declare CUtensorMap for use in cp_async_bulk_tensor_* PTX wrapping
|
| 17 |
+
// functions. These functions take a pointer to CUtensorMap, so do not need to
|
| 18 |
+
// know its size. This type is defined in cuda.h (driver API) as:
|
| 19 |
+
//
|
| 20 |
+
// typedef struct CUtensorMap_st { [ .. snip .. ] } CUtensorMap;
|
| 21 |
+
//
|
| 22 |
+
// We need to forward-declare both CUtensorMap_st (the struct) and CUtensorMap
|
| 23 |
+
// (the typedef):
|
| 24 |
+
struct CUtensorMap_st;
|
| 25 |
+
typedef struct CUtensorMap_st CUtensorMap;
|
| 26 |
+
|
| 27 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL
|
| 28 |
+
|
| 29 |
+
// Experimental exposure of TMA PTX:
|
| 30 |
+
//
|
| 31 |
+
// - cp_async_bulk_global_to_shared
|
| 32 |
+
// - cp_async_bulk_shared_to_global
|
| 33 |
+
// - cp_async_bulk_tensor_{1,2,3,4,5}d_global_to_shared
|
| 34 |
+
// - cp_async_bulk_tensor_{1,2,3,4,5}d_shared_to_global
|
| 35 |
+
// - fence_proxy_async_shared_cta
|
| 36 |
+
// - cp_async_bulk_commit_group
|
| 37 |
+
// - cp_async_bulk_wait_group_read<0, …, 7>
|
| 38 |
+
|
| 39 |
+
// These PTX wrappers are only available when the code is compiled compute
|
| 40 |
+
// capability 9.0 and above. The check for (!defined(__CUDA_MINIMUM_ARCH__)) is
|
| 41 |
+
// necessary to prevent cudafe from ripping out the device functions before
|
| 42 |
+
// device compilation begins.
|
| 43 |
+
#ifdef __cccl_lib_experimental_ctk12_cp_async_exposure
|
| 44 |
+
|
| 45 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk
|
| 46 |
+
inline _LIBCUDACXX_DEVICE
|
| 47 |
+
void cp_async_bulk_global_to_shared(void *__dest, const void *__src, _CUDA_VSTD::uint32_t __size, ::cuda::barrier<::cuda::thread_scope_block> &__bar)
|
| 48 |
+
{
|
| 49 |
+
_LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16.");
|
| 50 |
+
_LIBCUDACXX_DEBUG_ASSERT(__isShared(__dest), "Destination must be shared memory address.");
|
| 51 |
+
_LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__src), "Source must be global memory address.");
|
| 52 |
+
|
| 53 |
+
asm volatile(
|
| 54 |
+
"cp.async.bulk.shared::cluster.global.mbarrier::complete_tx::bytes [%0], [%1], %2, [%3];\n"
|
| 55 |
+
:
|
| 56 |
+
: "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))),
|
| 57 |
+
"l"(static_cast<_CUDA_VSTD::uint64_t>(__cvta_generic_to_global(__src))),
|
| 58 |
+
"r"(__size),
|
| 59 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar))))
|
| 60 |
+
: "memory");
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk
|
| 65 |
+
inline _LIBCUDACXX_DEVICE
|
| 66 |
+
void cp_async_bulk_shared_to_global(void *__dest, const void * __src, _CUDA_VSTD::uint32_t __size)
|
| 67 |
+
{
|
| 68 |
+
_LIBCUDACXX_DEBUG_ASSERT(__size % 16 == 0, "Size must be multiple of 16.");
|
| 69 |
+
_LIBCUDACXX_DEBUG_ASSERT(__isGlobal(__dest), "Destination must be global memory address.");
|
| 70 |
+
_LIBCUDACXX_DEBUG_ASSERT(__isShared(__src), "Source must be shared memory address.");
|
| 71 |
+
|
| 72 |
+
asm volatile(
|
| 73 |
+
"cp.async.bulk.global.shared::cta.bulk_group [%0], [%1], %2;\n"
|
| 74 |
+
:
|
| 75 |
+
: "l"(static_cast<_CUDA_VSTD::uint64_t>(__cvta_generic_to_global(__dest))),
|
| 76 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src))),
|
| 77 |
+
"r"(__size)
|
| 78 |
+
: "memory");
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 82 |
+
inline _LIBCUDACXX_DEVICE
|
| 83 |
+
void cp_async_bulk_tensor_1d_global_to_shared(
|
| 84 |
+
void *__dest, const CUtensorMap *__tensor_map , int __c0, ::cuda::barrier<::cuda::thread_scope_block> &__bar)
|
| 85 |
+
{
|
| 86 |
+
asm volatile(
|
| 87 |
+
"cp.async.bulk.tensor.1d.shared::cluster.global.tile.mbarrier::complete_tx::bytes "
|
| 88 |
+
"[%0], [%1, {%2}], [%3];\n"
|
| 89 |
+
:
|
| 90 |
+
: "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))),
|
| 91 |
+
"l"(__tensor_map),
|
| 92 |
+
"r"(__c0),
|
| 93 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar))))
|
| 94 |
+
: "memory");
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 98 |
+
inline _LIBCUDACXX_DEVICE
|
| 99 |
+
void cp_async_bulk_tensor_2d_global_to_shared(
|
| 100 |
+
void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, ::cuda::barrier<::cuda::thread_scope_block> &__bar)
|
| 101 |
+
{
|
| 102 |
+
asm volatile(
|
| 103 |
+
"cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes "
|
| 104 |
+
"[%0], [%1, {%2, %3}], [%4];\n"
|
| 105 |
+
:
|
| 106 |
+
: "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))),
|
| 107 |
+
"l"(__tensor_map),
|
| 108 |
+
"r"(__c0),
|
| 109 |
+
"r"(__c1),
|
| 110 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar))))
|
| 111 |
+
: "memory");
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 115 |
+
inline _LIBCUDACXX_DEVICE
|
| 116 |
+
void cp_async_bulk_tensor_3d_global_to_shared(
|
| 117 |
+
void *__dest, const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, ::cuda::barrier<::cuda::thread_scope_block> &__bar)
|
| 118 |
+
{
|
| 119 |
+
asm volatile(
|
| 120 |
+
"cp.async.bulk.tensor.3d.shared::cluster.global.tile.mbarrier::complete_tx::bytes "
|
| 121 |
+
"[%0], [%1, {%2, %3, %4}], [%5];\n"
|
| 122 |
+
:
|
| 123 |
+
: "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))),
|
| 124 |
+
"l"(__tensor_map),
|
| 125 |
+
"r"(__c0),
|
| 126 |
+
"r"(__c1),
|
| 127 |
+
"r"(__c2),
|
| 128 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar))))
|
| 129 |
+
: "memory");
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 133 |
+
inline _LIBCUDACXX_DEVICE
|
| 134 |
+
void cp_async_bulk_tensor_4d_global_to_shared(
|
| 135 |
+
void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, int __c2, int __c3, ::cuda::barrier<::cuda::thread_scope_block> &__bar)
|
| 136 |
+
{
|
| 137 |
+
asm volatile(
|
| 138 |
+
"cp.async.bulk.tensor.4d.shared::cluster.global.tile.mbarrier::complete_tx::bytes "
|
| 139 |
+
"[%0], [%1, {%2, %3, %4, %5}], [%6];\n"
|
| 140 |
+
:
|
| 141 |
+
: "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))),
|
| 142 |
+
"l"(__tensor_map),
|
| 143 |
+
"r"(__c0),
|
| 144 |
+
"r"(__c1),
|
| 145 |
+
"r"(__c2),
|
| 146 |
+
"r"(__c3),
|
| 147 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar))))
|
| 148 |
+
: "memory");
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 152 |
+
inline _LIBCUDACXX_DEVICE
|
| 153 |
+
void cp_async_bulk_tensor_5d_global_to_shared(
|
| 154 |
+
void *__dest, const CUtensorMap *__tensor_map , int __c0, int __c1, int __c2, int __c3, int __c4, ::cuda::barrier<::cuda::thread_scope_block> &__bar)
|
| 155 |
+
{
|
| 156 |
+
asm volatile(
|
| 157 |
+
"cp.async.bulk.tensor.5d.shared::cluster.global.tile.mbarrier::complete_tx::bytes "
|
| 158 |
+
"[%0], [%1, {%2, %3, %4, %5, %6}], [%7];\n"
|
| 159 |
+
:
|
| 160 |
+
: "r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__dest))),
|
| 161 |
+
"l"(__tensor_map),
|
| 162 |
+
"r"(__c0),
|
| 163 |
+
"r"(__c1),
|
| 164 |
+
"r"(__c2),
|
| 165 |
+
"r"(__c3),
|
| 166 |
+
"r"(__c4),
|
| 167 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(::cuda::device::barrier_native_handle(__bar))))
|
| 168 |
+
: "memory");
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 172 |
+
inline _LIBCUDACXX_DEVICE
|
| 173 |
+
void cp_async_bulk_tensor_1d_shared_to_global(
|
| 174 |
+
const CUtensorMap *__tensor_map, int __c0, const void *__src)
|
| 175 |
+
{
|
| 176 |
+
asm volatile(
|
| 177 |
+
"cp.async.bulk.tensor.1d.global.shared::cta.tile.bulk_group "
|
| 178 |
+
"[%0, {%1}], [%2];\n"
|
| 179 |
+
:
|
| 180 |
+
: "l"(__tensor_map),
|
| 181 |
+
"r"(__c0),
|
| 182 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src)))
|
| 183 |
+
: "memory");
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 187 |
+
inline _LIBCUDACXX_DEVICE
|
| 188 |
+
void cp_async_bulk_tensor_2d_shared_to_global(
|
| 189 |
+
const CUtensorMap *__tensor_map, int __c0, int __c1, const void *__src)
|
| 190 |
+
{
|
| 191 |
+
asm volatile(
|
| 192 |
+
"cp.async.bulk.tensor.2d.global.shared::cta.tile.bulk_group "
|
| 193 |
+
"[%0, {%1, %2}], [%3];\n"
|
| 194 |
+
:
|
| 195 |
+
: "l"(__tensor_map),
|
| 196 |
+
"r"(__c0),
|
| 197 |
+
"r"(__c1),
|
| 198 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src)))
|
| 199 |
+
: "memory");
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 203 |
+
inline _LIBCUDACXX_DEVICE
|
| 204 |
+
void cp_async_bulk_tensor_3d_shared_to_global(
|
| 205 |
+
const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, const void *__src)
|
| 206 |
+
{
|
| 207 |
+
asm volatile(
|
| 208 |
+
"cp.async.bulk.tensor.3d.global.shared::cta.tile.bulk_group "
|
| 209 |
+
"[%0, {%1, %2, %3}], [%4];\n"
|
| 210 |
+
:
|
| 211 |
+
: "l"(__tensor_map),
|
| 212 |
+
"r"(__c0),
|
| 213 |
+
"r"(__c1),
|
| 214 |
+
"r"(__c2),
|
| 215 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src)))
|
| 216 |
+
: "memory");
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 220 |
+
inline _LIBCUDACXX_DEVICE
|
| 221 |
+
void cp_async_bulk_tensor_4d_shared_to_global(
|
| 222 |
+
const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, int __c3, const void *__src)
|
| 223 |
+
{
|
| 224 |
+
asm volatile(
|
| 225 |
+
"cp.async.bulk.tensor.4d.global.shared::cta.tile.bulk_group "
|
| 226 |
+
"[%0, {%1, %2, %3, %4}], [%5];\n"
|
| 227 |
+
:
|
| 228 |
+
: "l"(__tensor_map),
|
| 229 |
+
"r"(__c0),
|
| 230 |
+
"r"(__c1),
|
| 231 |
+
"r"(__c2),
|
| 232 |
+
"r"(__c3),
|
| 233 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src)))
|
| 234 |
+
: "memory");
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-tensor
|
| 238 |
+
inline _LIBCUDACXX_DEVICE
|
| 239 |
+
void cp_async_bulk_tensor_5d_shared_to_global(
|
| 240 |
+
const CUtensorMap *__tensor_map, int __c0, int __c1, int __c2, int __c3, int __c4, const void *__src)
|
| 241 |
+
{
|
| 242 |
+
asm volatile(
|
| 243 |
+
"cp.async.bulk.tensor.5d.global.shared::cta.tile.bulk_group "
|
| 244 |
+
"[%0, {%1, %2, %3, %4, %5}], [%6];\n"
|
| 245 |
+
:
|
| 246 |
+
: "l"(__tensor_map),
|
| 247 |
+
"r"(__c0),
|
| 248 |
+
"r"(__c1),
|
| 249 |
+
"r"(__c2),
|
| 250 |
+
"r"(__c3),
|
| 251 |
+
"r"(__c4),
|
| 252 |
+
"r"(static_cast<_CUDA_VSTD::uint32_t>(__cvta_generic_to_shared(__src)))
|
| 253 |
+
: "memory");
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-membar
|
| 257 |
+
inline _LIBCUDACXX_DEVICE
|
| 258 |
+
void fence_proxy_async_shared_cta() {
|
| 259 |
+
asm volatile("fence.proxy.async.shared::cta; \n":::"memory");
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-commit-group
|
| 263 |
+
inline _LIBCUDACXX_DEVICE
|
| 264 |
+
void cp_async_bulk_commit_group()
|
| 265 |
+
{
|
| 266 |
+
asm volatile("cp.async.bulk.commit_group;\n" ::: "memory");
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cp-async-bulk-wait-group
|
| 270 |
+
template <int n_prior>
|
| 271 |
+
inline _LIBCUDACXX_DEVICE
|
| 272 |
+
void cp_async_bulk_wait_group_read()
|
| 273 |
+
{
|
| 274 |
+
static_assert(n_prior <= 63, "cp_async_bulk_wait_group_read: waiting for more than 63 groups is not supported.");
|
| 275 |
+
asm volatile("cp.async.bulk.wait_group.read %0; \n"
|
| 276 |
+
:
|
| 277 |
+
: "n"(n_prior)
|
| 278 |
+
: "memory");
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
#endif // __cccl_lib_experimental_ctk12_cp_async_exposure
|
| 282 |
+
|
| 283 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE_EXPERIMENTAL
|
| 284 |
+
|
| 285 |
+
#endif // _CUDA_BARRIER
|
miniCUDA124/include/cuda/functional
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// -*- C++ -*-
|
| 2 |
+
/*
|
| 3 |
+
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 4 |
+
*
|
| 5 |
+
* NVIDIA SOFTWARE LICENSE
|
| 6 |
+
*
|
| 7 |
+
* This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”).
|
| 8 |
+
*
|
| 9 |
+
* This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users.
|
| 10 |
+
*
|
| 11 |
+
* You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions.
|
| 12 |
+
*
|
| 13 |
+
* 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license.
|
| 14 |
+
*
|
| 15 |
+
* 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant:
|
| 16 |
+
* a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights.
|
| 17 |
+
* b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE.
|
| 18 |
+
*
|
| 19 |
+
* 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows:
|
| 20 |
+
* a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs.
|
| 21 |
+
* b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE.
|
| 22 |
+
* c. You may not modify or create derivative works of any portion of the SOFTWARE.
|
| 23 |
+
* d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE.
|
| 24 |
+
* e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge.
|
| 25 |
+
* f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses.
|
| 26 |
+
* g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms.
|
| 27 |
+
*
|
| 28 |
+
* 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems.
|
| 29 |
+
*
|
| 30 |
+
* 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE.
|
| 31 |
+
*
|
| 32 |
+
* 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict.
|
| 33 |
+
*
|
| 34 |
+
* 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice.
|
| 35 |
+
*
|
| 36 |
+
* 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED.
|
| 37 |
+
*
|
| 38 |
+
* 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT.
|
| 39 |
+
*
|
| 40 |
+
* 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you.
|
| 41 |
+
*
|
| 42 |
+
* 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction.
|
| 43 |
+
*
|
| 44 |
+
* 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect.
|
| 45 |
+
*
|
| 46 |
+
* 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE.
|
| 47 |
+
*
|
| 48 |
+
* 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051.
|
| 49 |
+
*
|
| 50 |
+
* 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party.
|
| 51 |
+
*
|
| 52 |
+
* (v. August 20, 2021)
|
| 53 |
+
*/
|
| 54 |
+
|
| 55 |
+
#ifndef _CUDA_FUNCTIONAL_
|
| 56 |
+
#define _CUDA_FUNCTIONAL_
|
| 57 |
+
|
| 58 |
+
#include <cuda/std/type_traits>
|
| 59 |
+
#include <cuda/std/functional>
|
| 60 |
+
#include <cuda/std/utility>
|
| 61 |
+
|
| 62 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA
|
| 63 |
+
namespace __detail
|
| 64 |
+
{
|
| 65 |
+
|
| 66 |
+
template <class _Ret, class _DecayFn>
|
| 67 |
+
class __return_type_wrapper {
|
| 68 |
+
private:
|
| 69 |
+
_DecayFn __fn_;
|
| 70 |
+
|
| 71 |
+
public:
|
| 72 |
+
__return_type_wrapper() = delete;
|
| 73 |
+
|
| 74 |
+
template <class _Fn,
|
| 75 |
+
class = _CUDA_VSTD::__enable_if_t<_CUDA_VSTD::is_same<_CUDA_VSTD::__decay_t<_Fn>, _DecayFn>::value>>
|
| 76 |
+
_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
|
| 77 |
+
explicit __return_type_wrapper(_Fn &&__fn) noexcept
|
| 78 |
+
: __fn_(_CUDA_VSTD::forward<_Fn>(__fn)) {}
|
| 79 |
+
|
| 80 |
+
template <class... _As>
|
| 81 |
+
_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
|
| 82 |
+
_Ret operator()(_As&&... __as) & noexcept {
|
| 83 |
+
#if !defined(__NVCC__) || defined(__CUDA_ARCH__)
|
| 84 |
+
static_assert(
|
| 85 |
+
_CUDA_VSTD::is_same<
|
| 86 |
+
_Ret,
|
| 87 |
+
typename _CUDA_VSTD::__invoke_of<_DecayFn&, _As...>::type
|
| 88 |
+
>::value,
|
| 89 |
+
"Return type shall match the proclaimed one exactly");
|
| 90 |
+
#endif
|
| 91 |
+
|
| 92 |
+
return _CUDA_VSTD::__invoke(__fn_, _CUDA_VSTD::forward<_As>(__as)...);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
template <class... _As>
|
| 96 |
+
_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
|
| 97 |
+
_Ret operator()(_As&&... __as) && noexcept {
|
| 98 |
+
#if !defined(__NVCC__) || defined(__CUDA_ARCH__)
|
| 99 |
+
static_assert(
|
| 100 |
+
_CUDA_VSTD::is_same<
|
| 101 |
+
_Ret,
|
| 102 |
+
typename _CUDA_VSTD::__invoke_of<_DecayFn, _As...>::type
|
| 103 |
+
>::value,
|
| 104 |
+
"Return type shall match the proclaimed one exactly");
|
| 105 |
+
#endif
|
| 106 |
+
|
| 107 |
+
return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_),
|
| 108 |
+
_CUDA_VSTD::forward<_As>(__as)...);
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template <class... _As>
|
| 112 |
+
_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
|
| 113 |
+
_Ret operator()(_As&&... __as) const& noexcept {
|
| 114 |
+
#if !defined(__NVCC__) || defined(__CUDA_ARCH__)
|
| 115 |
+
static_assert(
|
| 116 |
+
_CUDA_VSTD::is_same<
|
| 117 |
+
_Ret,
|
| 118 |
+
typename _CUDA_VSTD::__invoke_of<const _DecayFn&, _As...>::type
|
| 119 |
+
>::value,
|
| 120 |
+
"Return type shall match the proclaimed one exactly");
|
| 121 |
+
#endif
|
| 122 |
+
|
| 123 |
+
return _CUDA_VSTD::__invoke(__fn_, _CUDA_VSTD::forward<_As>(__as)...);
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
template <class... _As>
|
| 127 |
+
_LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
|
| 128 |
+
_Ret operator()(_As&&... __as) const&& noexcept {
|
| 129 |
+
#if !defined(__NVCC__) || defined(__CUDA_ARCH__)
|
| 130 |
+
static_assert(
|
| 131 |
+
_CUDA_VSTD::is_same<
|
| 132 |
+
_Ret,
|
| 133 |
+
typename _CUDA_VSTD::__invoke_of<const _DecayFn, _As...>::type
|
| 134 |
+
>::value,
|
| 135 |
+
"Return type shall match the proclaimed one exactly");
|
| 136 |
+
#endif
|
| 137 |
+
|
| 138 |
+
return _CUDA_VSTD::__invoke(_CUDA_VSTD::move(__fn_),
|
| 139 |
+
_CUDA_VSTD::forward<_As>(__as)...);
|
| 140 |
+
}
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
} // __detail
|
| 144 |
+
|
| 145 |
+
template <class _Ret, class _Fn>
|
| 146 |
+
inline _LIBCUDACXX_INLINE_VISIBILITY
|
| 147 |
+
__detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>>
|
| 148 |
+
proclaim_return_type(_Fn&& __fn) noexcept {
|
| 149 |
+
return __detail::__return_type_wrapper<_Ret, _CUDA_VSTD::__decay_t<_Fn>>(
|
| 150 |
+
_CUDA_VSTD::forward<_Fn>(__fn));
|
| 151 |
+
}
|
| 152 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA
|
| 153 |
+
|
| 154 |
+
#endif // _CUDA_FUNCTIONAL_
|
| 155 |
+
|
miniCUDA124/include/cuda/latch
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===----------------------------------------------------------------------===//
|
| 2 |
+
//
|
| 3 |
+
// Part of libcu++, the C++ Standard Library for your entire system,
|
| 4 |
+
// under the Apache License v2.0 with LLVM Exceptions.
|
| 5 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 6 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 7 |
+
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
|
| 8 |
+
//
|
| 9 |
+
//===----------------------------------------------------------------------===//
|
| 10 |
+
|
| 11 |
+
#ifndef _CUDA_LATCH
|
| 12 |
+
#define _CUDA_LATCH
|
| 13 |
+
|
| 14 |
+
#include "std/latch"
|
| 15 |
+
|
| 16 |
+
#endif // _CUDA_LATCH
|
miniCUDA124/include/cuda/memory_resource
ADDED
|
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//===----------------------------------------------------------------------===//
|
| 2 |
+
//
|
| 3 |
+
// Part of the CUDA Toolkit, under the Apache License v2.0 with LLVM Exceptions.
|
| 4 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 5 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 6 |
+
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
|
| 7 |
+
//
|
| 8 |
+
//===----------------------------------------------------------------------===//
|
| 9 |
+
|
| 10 |
+
#ifndef _CUDA_MEMORY_RESOURCE
|
| 11 |
+
#define _CUDA_MEMORY_RESOURCE
|
| 12 |
+
|
| 13 |
+
// clang-format off
|
| 14 |
+
/*
|
| 15 |
+
memory_resource synopsis
|
| 16 |
+
namespace cuda {
|
| 17 |
+
namespace mr {
|
| 18 |
+
template <class Resource>
|
| 19 |
+
concept resource = equality_comparable<Resource>
|
| 20 |
+
&& requires(Resource& res, void* ptr, size_t size, size_t alignment) {
|
| 21 |
+
{ res.allocate(size, alignment) } -> same_as<void*>;
|
| 22 |
+
{ res.deallocate(ptr, size, alignment) } -> same_as<void>;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
template <class Resource>
|
| 26 |
+
concept async_resource = resource<Resource>
|
| 27 |
+
&& requires(Resource& res, void* ptr, size_t size, size_t alignment, cuda_stream_ref stream) {
|
| 28 |
+
{ res.allocate_async(size, alignment, stream) } -> same_as<void*>;
|
| 29 |
+
{ res.deallocate_async(ptr, size, alignment, stream) } -> same_as<void>;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
template <class Resource, class Property>
|
| 33 |
+
concept has_property = resource<Resource> && requires(const Resource& res, Property prop) {
|
| 34 |
+
get_property(res, prop);
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
template <class Property>
|
| 38 |
+
concept property_with_value = requires {
|
| 39 |
+
typename Property::value_type;
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
template <class Resource, class Property, class Return>
|
| 43 |
+
concept has_property_with = resource<Resource>
|
| 44 |
+
&& property_with_value<Property>
|
| 45 |
+
&& same_as<Return, typename Property::value_type>
|
| 46 |
+
&& requires(const Resource& res, Property prop) {
|
| 47 |
+
get_property(res, prop) -> Return;
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
template <class Resource, class... Properties>
|
| 51 |
+
concept resource_with = resource<Resource> && (has_property<Resource, Properties> && ...);
|
| 52 |
+
|
| 53 |
+
template <class Resource, class... Properties>
|
| 54 |
+
concept async_resource_with = async_resource<Resource> && (has_property<Resource, Properties> && ...);
|
| 55 |
+
|
| 56 |
+
template <class... Properties>
|
| 57 |
+
class resource_ref {
|
| 58 |
+
template <resource_with<Properties...> Resource>
|
| 59 |
+
resource_ref(Resource&) noexcept;
|
| 60 |
+
|
| 61 |
+
void* allocate(size_t size, size_t alignment);
|
| 62 |
+
void deallocate(void* ptr, size_t size, size_t alignment);
|
| 63 |
+
|
| 64 |
+
template <class... OtherProperties>
|
| 65 |
+
requires resource_with<resource_ref, OtherProperties...>
|
| 66 |
+
&& resource_with<resource_ref<OtherProperties...>, Properties...>
|
| 67 |
+
friend bool operator==(const resource_ref& left, const resource_ref<OtherProperties...>& right);
|
| 68 |
+
|
| 69 |
+
template <property_with_value Property>
|
| 70 |
+
requires has_property<resource_ref, Property>
|
| 71 |
+
friend typename Property::value_type get_property(const resource_ref& ref, Property) noexcept;
|
| 72 |
+
|
| 73 |
+
template <class Property>
|
| 74 |
+
requires (has_property<resource_ref, Property> && !property_with_value<Property>)
|
| 75 |
+
friend void get_property(const resource_ref& ref, Property) noexcept;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
} // mr
|
| 79 |
+
} // cuda
|
| 80 |
+
*/
|
| 81 |
+
// clang-format on
|
| 82 |
+
|
| 83 |
+
#ifdef LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE
|
| 84 |
+
|
| 85 |
+
#include <cuda/stream_ref>
|
| 86 |
+
|
| 87 |
+
#include <cuda/std/concepts>
|
| 88 |
+
#include <cuda/std/type_traits>
|
| 89 |
+
|
| 90 |
+
#include <cuda/std/detail/__config>
|
| 91 |
+
|
| 92 |
+
#include <cuda/std/detail/__pragma_push>
|
| 93 |
+
|
| 94 |
+
#if _LIBCUDACXX_STD_VER > 11
|
| 95 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA
|
| 96 |
+
|
| 97 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 98 |
+
// properties
|
| 99 |
+
|
| 100 |
+
/// \concept has_property
|
| 101 |
+
/// \brief The \c has_property concept
|
| 102 |
+
template <class _Resource, class _Property>
|
| 103 |
+
_LIBCUDACXX_CONCEPT_FRAGMENT(
|
| 104 |
+
__has_property_,
|
| 105 |
+
requires(const _Resource& __res)(
|
| 106 |
+
get_property(__res, _Property{})
|
| 107 |
+
));
|
| 108 |
+
template <class _Resource, class _Property>
|
| 109 |
+
_LIBCUDACXX_CONCEPT has_property = _LIBCUDACXX_FRAGMENT(__has_property_, _Resource, _Property);
|
| 110 |
+
|
| 111 |
+
/// \concept property_with_value
|
| 112 |
+
/// \brief The \c property_with_value concept
|
| 113 |
+
template <class _Property>
|
| 114 |
+
using __property_value_t = typename _Property::value_type;
|
| 115 |
+
|
| 116 |
+
template <class _Property>
|
| 117 |
+
_LIBCUDACXX_CONCEPT_FRAGMENT(
|
| 118 |
+
__property_with_value_,
|
| 119 |
+
requires()(
|
| 120 |
+
typename(__property_value_t<_Property>)
|
| 121 |
+
));
|
| 122 |
+
template <class _Property>
|
| 123 |
+
_LIBCUDACXX_CONCEPT property_with_value = _LIBCUDACXX_FRAGMENT(__property_with_value_, _Property);
|
| 124 |
+
|
| 125 |
+
/// \concept has_property_with
|
| 126 |
+
/// \brief The \c has_property_with concept
|
| 127 |
+
template <class _Resource, class _Property, class _Return>
|
| 128 |
+
_LIBCUDACXX_CONCEPT_FRAGMENT(
|
| 129 |
+
__has_property_with_,
|
| 130 |
+
requires(const _Resource& __res)(
|
| 131 |
+
requires(property_with_value<_Property>),
|
| 132 |
+
requires(_CUDA_VSTD::same_as<_Return, decltype(get_property(__res, _Property{}))>)
|
| 133 |
+
));
|
| 134 |
+
template <class _Resource, class _Property, class _Return>
|
| 135 |
+
_LIBCUDACXX_CONCEPT has_property_with = _LIBCUDACXX_FRAGMENT(__has_property_with_, _Resource, _Property, _Return);
|
| 136 |
+
|
| 137 |
+
/// \concept __has_upstream_resource
|
| 138 |
+
/// \brief The \c __has_upstream_resource concept
|
| 139 |
+
template <class _Resource, class _Upstream>
|
| 140 |
+
_LIBCUDACXX_CONCEPT_FRAGMENT(
|
| 141 |
+
__has_upstream_resource_,
|
| 142 |
+
requires(const _Resource& __res)(
|
| 143 |
+
requires(_CUDA_VSTD::same_as<_CUDA_VSTD::__remove_const_ref_t<decltype(__res.upstream_resource())>, _Upstream>)
|
| 144 |
+
));
|
| 145 |
+
template <class _Resource, class _Upstream>
|
| 146 |
+
_LIBCUDACXX_CONCEPT __has_upstream_resource = _LIBCUDACXX_FRAGMENT(__has_upstream_resource_, _Resource, _Upstream);
|
| 147 |
+
|
| 148 |
+
/// class forward_property
|
| 149 |
+
/// \brief The \c forward_property crtp template simplifies the user facing side of forwarding properties
|
| 150 |
+
/// We can just derive from it to properly forward all properties
|
| 151 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CPO(__forward_property)
|
| 152 |
+
template <class _Derived, class _Upstream>
|
| 153 |
+
struct __fn {
|
| 154 |
+
_LIBCUDACXX_DISABLE_EXEC_CHECK
|
| 155 |
+
_LIBCUDACXX_TEMPLATE(class _Property)
|
| 156 |
+
_LIBCUDACXX_REQUIRES( (!property_with_value<_Property>) _LIBCUDACXX_AND has_property<_Upstream, _Property>)
|
| 157 |
+
_LIBCUDACXX_INLINE_VISIBILITY friend constexpr void get_property(const _Derived&, _Property) noexcept {}
|
| 158 |
+
|
| 159 |
+
// The indirection is needed, otherwise the compiler might believe that _Derived is an incomplete type
|
| 160 |
+
_LIBCUDACXX_DISABLE_EXEC_CHECK
|
| 161 |
+
_LIBCUDACXX_TEMPLATE(class _Property, class _Derived2 = _Derived)
|
| 162 |
+
_LIBCUDACXX_REQUIRES( property_with_value<_Property> _LIBCUDACXX_AND has_property<_Upstream, _Property> _LIBCUDACXX_AND
|
| 163 |
+
__has_upstream_resource<_Derived2, _Upstream>)
|
| 164 |
+
_LIBCUDACXX_INLINE_VISIBILITY friend constexpr __property_value_t<_Property> get_property(
|
| 165 |
+
const _Derived& __res, _Property __prop) {
|
| 166 |
+
return get_property(__res.upstream_resource(), __prop);
|
| 167 |
+
}
|
| 168 |
+
};
|
| 169 |
+
_LIBCUDACXX_END_NAMESPACE_CPO
|
| 170 |
+
|
| 171 |
+
template <class _Derived, class _Upstream>
|
| 172 |
+
using forward_property = __forward_property::__fn<_Derived, _Upstream>;
|
| 173 |
+
|
| 174 |
+
/// class get_property
|
| 175 |
+
/// \brief The \c get_property crtp temaplate simplifies the user facing side of forwarding properties
|
| 176 |
+
/// We can always tell people to just derive from it to properly forward all properties
|
| 177 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CPO(__get_property)
|
| 178 |
+
struct __fn {
|
| 179 |
+
_LIBCUDACXX_DISABLE_EXEC_CHECK
|
| 180 |
+
_LIBCUDACXX_TEMPLATE(class _Upstream, class _Property)
|
| 181 |
+
_LIBCUDACXX_REQUIRES( (!property_with_value<_Property>) _LIBCUDACXX_AND has_property<_Upstream, _Property>)
|
| 182 |
+
_LIBCUDACXX_INLINE_VISIBILITY constexpr void operator()(const _Upstream&, _Property) const noexcept {}
|
| 183 |
+
|
| 184 |
+
_LIBCUDACXX_DISABLE_EXEC_CHECK
|
| 185 |
+
_LIBCUDACXX_TEMPLATE(class _Upstream, class _Property)
|
| 186 |
+
_LIBCUDACXX_REQUIRES( (property_with_value<_Property>) _LIBCUDACXX_AND has_property<_Upstream, _Property>)
|
| 187 |
+
_LIBCUDACXX_INLINE_VISIBILITY constexpr __property_value_t<_Property> operator()(
|
| 188 |
+
const _Upstream& __res, _Property __prop) const {
|
| 189 |
+
return get_property(__res, __prop);
|
| 190 |
+
}
|
| 191 |
+
};
|
| 192 |
+
_LIBCUDACXX_END_NAMESPACE_CPO
|
| 193 |
+
|
| 194 |
+
inline namespace __cpo {
|
| 195 |
+
_LIBCUDACXX_CPO_ACCESSIBILITY auto get_property = __get_property::__fn{};
|
| 196 |
+
} // namespace __cpo
|
| 197 |
+
|
| 198 |
+
namespace mr
|
| 199 |
+
{
|
| 200 |
+
|
| 201 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 202 |
+
// memory_resource
|
| 203 |
+
|
| 204 |
+
/// \concept resource
|
| 205 |
+
/// \brief The \c resource concept
|
| 206 |
+
template <class _Resource>
|
| 207 |
+
_LIBCUDACXX_CONCEPT_FRAGMENT(
|
| 208 |
+
__resource_,
|
| 209 |
+
requires(_Resource& __res, void* __ptr, size_t __bytes, size_t __alignment) (
|
| 210 |
+
requires(_CUDA_VSTD::same_as<void*, decltype(__res.allocate(__bytes, __alignment))>),
|
| 211 |
+
requires(_CUDA_VSTD::same_as<void, decltype(__res.deallocate(__ptr, __bytes, __alignment))>),
|
| 212 |
+
requires(_CUDA_VSTD::equality_comparable<_Resource>)
|
| 213 |
+
));
|
| 214 |
+
|
| 215 |
+
template <class _Resource>
|
| 216 |
+
_LIBCUDACXX_CONCEPT resource = _LIBCUDACXX_FRAGMENT(__resource_, _Resource);
|
| 217 |
+
|
| 218 |
+
/// \concept async_resource
|
| 219 |
+
/// \brief The \c async_resource concept
|
| 220 |
+
template <class _Resource>
|
| 221 |
+
_LIBCUDACXX_CONCEPT_FRAGMENT(
|
| 222 |
+
__async_resource_,
|
| 223 |
+
requires(_Resource& __res, void* __ptr, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) (
|
| 224 |
+
requires(resource<_Resource>),
|
| 225 |
+
requires(_CUDA_VSTD::same_as<void*, decltype(__res.allocate_async(__bytes, __alignment, __stream))>),
|
| 226 |
+
requires(_CUDA_VSTD::same_as<void, decltype(__res.deallocate_async(__ptr, __bytes, __alignment, __stream))>),
|
| 227 |
+
requires(_CUDA_VSTD::equality_comparable<_Resource>)
|
| 228 |
+
));
|
| 229 |
+
|
| 230 |
+
template <class _Resource>
|
| 231 |
+
_LIBCUDACXX_CONCEPT async_resource = _LIBCUDACXX_FRAGMENT(__async_resource_, _Resource);
|
| 232 |
+
|
| 233 |
+
/// \concept resource_with
|
| 234 |
+
/// \brief The \c resource_with concept
|
| 235 |
+
template <class _Resource, class... _Properties>
|
| 236 |
+
#if _LIBCUDACXX_STD_VER < 17
|
| 237 |
+
_LIBCUDACXX_CONCEPT resource_with =
|
| 238 |
+
resource<_Resource>&& _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant<has_property<_Resource, _Properties>>...>;
|
| 239 |
+
#else
|
| 240 |
+
_LIBCUDACXX_CONCEPT resource_with = resource<_Resource> && (has_property<_Resource, _Properties> && ...);
|
| 241 |
+
#endif
|
| 242 |
+
|
| 243 |
+
/// \concept async_resource_with
|
| 244 |
+
/// \brief The \c async_resource_with concept
|
| 245 |
+
template <class _Resource, class... _Properties>
|
| 246 |
+
#if _LIBCUDACXX_STD_VER < 17
|
| 247 |
+
_LIBCUDACXX_CONCEPT async_resource_with = async_resource<_Resource> &&
|
| 248 |
+
_CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant<has_property<_Resource, _Properties>>...>;
|
| 249 |
+
#else
|
| 250 |
+
_LIBCUDACXX_CONCEPT async_resource_with = async_resource<_Resource> &&
|
| 251 |
+
(has_property<_Resource, _Properties> && ...);
|
| 252 |
+
#endif
|
| 253 |
+
|
| 254 |
+
///////////////////////////////////////////////////////////////////////////////
|
| 255 |
+
/// class resource_ref
|
| 256 |
+
/// class async_resource_ref
|
| 257 |
+
enum class _AllocType
|
| 258 |
+
{
|
| 259 |
+
_Default,
|
| 260 |
+
_Async,
|
| 261 |
+
};
|
| 262 |
+
|
| 263 |
+
struct _Alloc_vtable
|
| 264 |
+
{
|
| 265 |
+
using _AllocFn = void* (*)(void*, size_t, size_t);
|
| 266 |
+
using _DeallocFn = void (*)(void*, void*, size_t, size_t);
|
| 267 |
+
using _EqualFn = bool (*)(void*, void*);
|
| 268 |
+
|
| 269 |
+
_AllocFn __alloc_fn;
|
| 270 |
+
_DeallocFn __dealloc_fn;
|
| 271 |
+
_EqualFn __equal_fn;
|
| 272 |
+
|
| 273 |
+
constexpr _Alloc_vtable(_AllocFn __alloc_fn_, _DeallocFn __dealloc_fn_, _EqualFn __equal_fn_) noexcept
|
| 274 |
+
: __alloc_fn(__alloc_fn_)
|
| 275 |
+
, __dealloc_fn(__dealloc_fn_)
|
| 276 |
+
, __equal_fn(__equal_fn_)
|
| 277 |
+
{}
|
| 278 |
+
};
|
| 279 |
+
|
| 280 |
+
struct _Async_alloc_vtable : public _Alloc_vtable
|
| 281 |
+
{
|
| 282 |
+
using _AsyncAllocFn = void* (*)(void*, size_t, size_t, cuda::stream_ref);
|
| 283 |
+
using _AsyncDeallocFn = void (*)(void*, void*, size_t, size_t, cuda::stream_ref);
|
| 284 |
+
|
| 285 |
+
_AsyncAllocFn __async_alloc_fn;
|
| 286 |
+
_AsyncDeallocFn __async_dealloc_fn;
|
| 287 |
+
|
| 288 |
+
constexpr _Async_alloc_vtable(_Alloc_vtable::_AllocFn __alloc_fn_,
|
| 289 |
+
_Alloc_vtable::_DeallocFn __dealloc_fn_,
|
| 290 |
+
_Alloc_vtable::_EqualFn __equal_fn_,
|
| 291 |
+
_AsyncAllocFn __async_alloc_fn_,
|
| 292 |
+
_AsyncDeallocFn __async_dealloc_fn_) noexcept
|
| 293 |
+
: _Alloc_vtable(__alloc_fn_, __dealloc_fn_, __equal_fn_)
|
| 294 |
+
, __async_alloc_fn(__async_alloc_fn_)
|
| 295 |
+
, __async_dealloc_fn(__async_dealloc_fn_)
|
| 296 |
+
{}
|
| 297 |
+
};
|
| 298 |
+
|
| 299 |
+
// clang-format off
|
| 300 |
+
struct _Resource_vtable_builder
|
| 301 |
+
{
|
| 302 |
+
template <class _Resource, class _Property>
|
| 303 |
+
static __property_value_t<_Property> _Get_property(void* __res) noexcept {
|
| 304 |
+
return get_property(*static_cast<const _Resource *>(__res), _Property{});
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
template <class _Resource>
|
| 308 |
+
static void* _Alloc(void* __object, size_t __bytes, size_t __alignment) {
|
| 309 |
+
return static_cast<_Resource *>(__object)->allocate(__bytes, __alignment);
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
template <class _Resource>
|
| 313 |
+
static void _Dealloc(void* __object, void* __ptr, size_t __bytes, size_t __alignment) {
|
| 314 |
+
return static_cast<_Resource *>(__object)->deallocate(__ptr, __bytes, __alignment);
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
template <class _Resource>
|
| 318 |
+
static void* _Alloc_async(void* __object, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) {
|
| 319 |
+
return static_cast<_Resource *>(__object)->allocate_async(__bytes, __alignment, __stream);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
template <class _Resource>
|
| 323 |
+
static void _Dealloc_async(void* __object, void* __ptr, size_t __bytes, size_t __alignment, cuda::stream_ref __stream) {
|
| 324 |
+
return static_cast<_Resource *>(__object)->deallocate_async(__ptr, __bytes, __alignment, __stream);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
template <class _Resource>
|
| 328 |
+
static bool _Equal(void* __left, void* __right) {
|
| 329 |
+
return *static_cast<_Resource *>(__left) == *static_cast<_Resource *>(__right);
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
_LIBCUDACXX_TEMPLATE(class _Resource, _AllocType _Alloc_type)
|
| 333 |
+
_LIBCUDACXX_REQUIRES((_Alloc_type == _AllocType::_Default)) //
|
| 334 |
+
static constexpr _Alloc_vtable _Create() noexcept
|
| 335 |
+
{
|
| 336 |
+
return {&_Resource_vtable_builder::_Alloc<_Resource>,
|
| 337 |
+
&_Resource_vtable_builder::_Dealloc<_Resource>,
|
| 338 |
+
&_Resource_vtable_builder::_Equal<_Resource>};
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
_LIBCUDACXX_TEMPLATE(class _Resource, _AllocType _Alloc_type)
|
| 342 |
+
_LIBCUDACXX_REQUIRES((_Alloc_type == _AllocType::_Async)) //
|
| 343 |
+
static constexpr _Async_alloc_vtable _Create() noexcept
|
| 344 |
+
{
|
| 345 |
+
return {&_Resource_vtable_builder::_Alloc<_Resource>,
|
| 346 |
+
&_Resource_vtable_builder::_Dealloc<_Resource>,
|
| 347 |
+
&_Resource_vtable_builder::_Equal<_Resource>,
|
| 348 |
+
&_Resource_vtable_builder::_Alloc_async<_Resource>,
|
| 349 |
+
&_Resource_vtable_builder::_Dealloc_async<_Resource>};
|
| 350 |
+
}
|
| 351 |
+
};
|
| 352 |
+
// clang-format on
|
| 353 |
+
|
| 354 |
+
template <class _Property>
|
| 355 |
+
struct _Property_vtable
|
| 356 |
+
{
|
| 357 |
+
using _PropertyFn = __property_value_t<_Property> (*)(void*);
|
| 358 |
+
_PropertyFn __property_fn = nullptr;
|
| 359 |
+
|
| 360 |
+
constexpr _Property_vtable(_PropertyFn __property_fn_) noexcept
|
| 361 |
+
: __property_fn(__property_fn_)
|
| 362 |
+
{}
|
| 363 |
+
};
|
| 364 |
+
|
| 365 |
+
template <_AllocType _Alloc_type, class... _Properties> //
|
| 366 |
+
class basic_resource_ref;
|
| 367 |
+
|
| 368 |
+
template <class... _Properties>
|
| 369 |
+
struct _Resource_vtable : public _Property_vtable<_Properties>...
|
| 370 |
+
{
|
| 371 |
+
template <class... _PropertyFns>
|
| 372 |
+
constexpr _Resource_vtable(_PropertyFns... __property_fn_) noexcept
|
| 373 |
+
: _Property_vtable<_Properties>(__property_fn_)...
|
| 374 |
+
{}
|
| 375 |
+
|
| 376 |
+
template <_AllocType _Alloc_type, class... _OtherProperties>
|
| 377 |
+
constexpr _Resource_vtable(basic_resource_ref<_Alloc_type, _OtherProperties...>& __ref) noexcept
|
| 378 |
+
: _Property_vtable<_Properties>(__ref._Property_vtable<_Properties>::__property_fn)...
|
| 379 |
+
{}
|
| 380 |
+
|
| 381 |
+
template <class _Resource>
|
| 382 |
+
static constexpr _Resource_vtable _Create() noexcept
|
| 383 |
+
{
|
| 384 |
+
return {&_Resource_vtable_builder::_Get_property<_Resource, _Properties>...};
|
| 385 |
+
}
|
| 386 |
+
};
|
| 387 |
+
|
| 388 |
+
template <class... _Properties>
|
| 389 |
+
struct _Filtered;
|
| 390 |
+
|
| 391 |
+
template <bool _IsUniqueProperty>
|
| 392 |
+
struct _Property_filter
|
| 393 |
+
{
|
| 394 |
+
template <class _Property, class... _Properties>
|
| 395 |
+
using _Filtered_properties =
|
| 396 |
+
typename _Filtered<_Properties...>::_Filtered_vtable::template _Append_property<_Property>;
|
| 397 |
+
};
|
| 398 |
+
template <>
|
| 399 |
+
struct _Property_filter<false>
|
| 400 |
+
{
|
| 401 |
+
template <class _Property, class... _Properties>
|
| 402 |
+
using _Filtered_properties = typename _Filtered<_Properties...>::_Filtered_vtable;
|
| 403 |
+
};
|
| 404 |
+
|
| 405 |
+
template <class _Property, class... _Properties>
|
| 406 |
+
struct _Filtered<_Property, _Properties...>
|
| 407 |
+
{
|
| 408 |
+
using _Filtered_vtable =
|
| 409 |
+
typename _Property_filter<property_with_value<_Property> && !_CUDA_VSTD::_One_of<_Property, _Properties...>>::
|
| 410 |
+
template _Filtered_properties<_Property, _Properties...>;
|
| 411 |
+
|
| 412 |
+
template <class _OtherPropery>
|
| 413 |
+
using _Append_property = _Filtered<_OtherPropery, _Property, _Properties...>;
|
| 414 |
+
|
| 415 |
+
using _Vtable = _Resource_vtable<_Property, _Properties...>;
|
| 416 |
+
};
|
| 417 |
+
|
| 418 |
+
template <>
|
| 419 |
+
struct _Filtered<>
|
| 420 |
+
{
|
| 421 |
+
using _Filtered_vtable = _Filtered<>;
|
| 422 |
+
|
| 423 |
+
template <class _OtherPropery>
|
| 424 |
+
using _Append_property = _Filtered<_OtherPropery>;
|
| 425 |
+
|
| 426 |
+
using _Vtable = _Resource_vtable<>;
|
| 427 |
+
};
|
| 428 |
+
|
| 429 |
+
template <class... _Properties>
|
| 430 |
+
using _Filtered_vtable = typename _Filtered<_Properties...>::_Filtered_vtable::_Vtable;
|
| 431 |
+
|
| 432 |
+
template <class _Vtable>
|
| 433 |
+
struct _Alloc_base
|
| 434 |
+
{
|
| 435 |
+
static_assert(_CUDA_VSTD::is_base_of_v<_Alloc_vtable, _Vtable>, "");
|
| 436 |
+
|
| 437 |
+
_Alloc_base(void* __object_, const _Vtable* __static_vtabl_) noexcept
|
| 438 |
+
: __object(__object_)
|
| 439 |
+
, __static_vtable(__static_vtabl_)
|
| 440 |
+
{}
|
| 441 |
+
|
| 442 |
+
void* allocate(size_t __bytes, size_t __alignment = alignof(max_align_t))
|
| 443 |
+
{
|
| 444 |
+
return __static_vtable->__alloc_fn(__object, __bytes, __alignment);
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
void deallocate(void* _Ptr, size_t __bytes, size_t __alignment = alignof(max_align_t))
|
| 448 |
+
{
|
| 449 |
+
__static_vtable->__dealloc_fn(__object, _Ptr, __bytes, __alignment);
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
protected:
|
| 453 |
+
void* __object = nullptr;
|
| 454 |
+
const _Vtable* __static_vtable = nullptr;
|
| 455 |
+
};
|
| 456 |
+
|
| 457 |
+
template <class _Vtable>
|
| 458 |
+
struct _Async_alloc_base : public _Alloc_base<_Vtable>
|
| 459 |
+
{
|
| 460 |
+
static_assert(_CUDA_VSTD::is_base_of_v<_Async_alloc_vtable, _Vtable>, "");
|
| 461 |
+
|
| 462 |
+
_Async_alloc_base(void* __object_, const _Vtable* __static_vtabl_) noexcept
|
| 463 |
+
: _Alloc_base<_Vtable>(__object_, __static_vtabl_)
|
| 464 |
+
{}
|
| 465 |
+
|
| 466 |
+
void* allocate_async(size_t __bytes, size_t __alignment, cuda::stream_ref __stream)
|
| 467 |
+
{
|
| 468 |
+
return this->__static_vtable->__async_alloc_fn(this->__object, __bytes, __alignment, __stream);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
void* allocate_async(size_t __bytes, cuda::stream_ref __stream)
|
| 472 |
+
{
|
| 473 |
+
return this->__static_vtable->__async_alloc_fn(this->__object, __bytes, alignof(max_align_t), __stream);
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
void deallocate_async(void* _Ptr, size_t __bytes, cuda::stream_ref __stream)
|
| 477 |
+
{
|
| 478 |
+
this->__static_vtable->__async_dealloc_fn(this->__object, _Ptr, __bytes, alignof(max_align_t), __stream);
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
void deallocate_async(void* _Ptr, size_t __bytes, size_t __alignment, cuda::stream_ref __stream)
|
| 482 |
+
{
|
| 483 |
+
this->__static_vtable->__async_dealloc_fn(this->__object, _Ptr, __bytes, __alignment, __stream);
|
| 484 |
+
}
|
| 485 |
+
};
|
| 486 |
+
|
| 487 |
+
template <_AllocType _Alloc_type>
|
| 488 |
+
using _Resource_ref_base = _CUDA_VSTD::
|
| 489 |
+
_If<_Alloc_type == _AllocType::_Default, _Alloc_base<_Alloc_vtable>, _Async_alloc_base<_Async_alloc_vtable>>;
|
| 490 |
+
|
| 491 |
+
template <_AllocType _Alloc_type>
|
| 492 |
+
using _Vtable_store = _CUDA_VSTD::_If<_Alloc_type == _AllocType::_Default, _Alloc_vtable, _Async_alloc_vtable>;
|
| 493 |
+
|
| 494 |
+
template <_AllocType _Alloc_type, class _Resource>
|
| 495 |
+
_LIBCUDACXX_INLINE_VAR constexpr _Vtable_store<_Alloc_type>
|
| 496 |
+
__alloc_vtable = _Resource_vtable_builder::template _Create<_Resource, _Alloc_type>();
|
| 497 |
+
|
| 498 |
+
template <class>
|
| 499 |
+
_LIBCUDACXX_INLINE_VAR constexpr bool _Is_basic_resource_ref = false;
|
| 500 |
+
|
| 501 |
+
template <_AllocType _Alloc_type, class... _Properties> //
|
| 502 |
+
class basic_resource_ref
|
| 503 |
+
: public _Resource_ref_base<_Alloc_type>
|
| 504 |
+
, private _Filtered_vtable<_Properties...>
|
| 505 |
+
{
|
| 506 |
+
private:
|
| 507 |
+
template <_AllocType, class...>
|
| 508 |
+
friend class basic_resource_ref;
|
| 509 |
+
|
| 510 |
+
template <class...>
|
| 511 |
+
friend struct _Resource_vtable;
|
| 512 |
+
|
| 513 |
+
public:
|
| 514 |
+
// clang-format off
|
| 515 |
+
_LIBCUDACXX_TEMPLATE(class _Resource)
|
| 516 |
+
_LIBCUDACXX_REQUIRES( (!_Is_basic_resource_ref<_Resource>
|
| 517 |
+
&& (((_Alloc_type == _AllocType::_Default) && resource_with<_Resource, _Properties...>) //
|
| 518 |
+
||((_Alloc_type == _AllocType::_Async) && async_resource_with<_Resource, _Properties...>)))) //
|
| 519 |
+
basic_resource_ref(_Resource& __res) noexcept
|
| 520 |
+
: _Resource_ref_base<_Alloc_type>(_CUDA_VSTD::addressof(__res), &__alloc_vtable<_Alloc_type, _Resource>)
|
| 521 |
+
, _Filtered_vtable<_Properties...>(_Filtered_vtable<_Properties...>::template _Create<_Resource>())
|
| 522 |
+
{}
|
| 523 |
+
|
| 524 |
+
_LIBCUDACXX_TEMPLATE(class _Resource)
|
| 525 |
+
_LIBCUDACXX_REQUIRES( (!_Is_basic_resource_ref<_Resource>
|
| 526 |
+
&& (((_Alloc_type == _AllocType::_Default) && resource_with<_Resource, _Properties...>) //
|
| 527 |
+
||((_Alloc_type == _AllocType::_Async) && async_resource_with<_Resource, _Properties...>)))) //
|
| 528 |
+
basic_resource_ref(_Resource* __res) noexcept
|
| 529 |
+
: _Resource_ref_base<_Alloc_type>(__res, &__alloc_vtable<_Alloc_type, _Resource>)
|
| 530 |
+
, _Filtered_vtable<_Properties...>(_Filtered_vtable<_Properties...>::template _Create<_Resource>())
|
| 531 |
+
{}
|
| 532 |
+
|
| 533 |
+
#if _LIBCUDACXX_STD_VER > 14
|
| 534 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 535 |
+
_LIBCUDACXX_REQUIRES( (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...))
|
| 536 |
+
#else
|
| 537 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 538 |
+
_LIBCUDACXX_REQUIRES( _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant<
|
| 539 |
+
_CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>)
|
| 540 |
+
#endif
|
| 541 |
+
basic_resource_ref(
|
| 542 |
+
basic_resource_ref<_Alloc_type, _OtherProperties...> __ref) noexcept
|
| 543 |
+
: _Resource_ref_base<_Alloc_type>(__ref.__object, __ref.__static_vtable)
|
| 544 |
+
, _Filtered_vtable<_Properties...>(__ref)
|
| 545 |
+
{}
|
| 546 |
+
|
| 547 |
+
#if _LIBCUDACXX_STD_VER > 14
|
| 548 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 549 |
+
_LIBCUDACXX_REQUIRES( (_Alloc_type == _AllocType::_Default)
|
| 550 |
+
&& (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...))
|
| 551 |
+
#else
|
| 552 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 553 |
+
_LIBCUDACXX_REQUIRES( (_Alloc_type == _AllocType::_Default)
|
| 554 |
+
&& _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant<
|
| 555 |
+
_CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>)
|
| 556 |
+
#endif
|
| 557 |
+
basic_resource_ref(
|
| 558 |
+
basic_resource_ref<_AllocType::_Async, _OtherProperties...> __ref) noexcept
|
| 559 |
+
: _Resource_ref_base<_Alloc_type>(__ref.__object, __ref.__static_vtable)
|
| 560 |
+
, _Filtered_vtable<_Properties...>(__ref)
|
| 561 |
+
{}
|
| 562 |
+
|
| 563 |
+
#if _LIBCUDACXX_STD_VER > 14
|
| 564 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 565 |
+
_LIBCUDACXX_REQUIRES((sizeof...(_Properties) == sizeof...(_OtherProperties))
|
| 566 |
+
&& (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...))
|
| 567 |
+
#else
|
| 568 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 569 |
+
_LIBCUDACXX_REQUIRES( (sizeof...(_Properties) == sizeof...(_OtherProperties))
|
| 570 |
+
&& _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant<
|
| 571 |
+
_CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>)
|
| 572 |
+
#endif
|
| 573 |
+
bool operator==(
|
| 574 |
+
const basic_resource_ref<_Alloc_type, _OtherProperties...> &__right) const {
|
| 575 |
+
return (this->__static_vtable->__equal_fn == __right.__static_vtable->__equal_fn) //
|
| 576 |
+
&& this->__static_vtable->__equal_fn(this->__object, __right.__object);
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
#if _LIBCUDACXX_STD_VER > 14
|
| 580 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 581 |
+
_LIBCUDACXX_REQUIRES( (sizeof...(_Properties) == sizeof...(_OtherProperties))
|
| 582 |
+
&& (_CUDA_VSTD::_One_of<_Properties, _OtherProperties...> && ...))
|
| 583 |
+
#else
|
| 584 |
+
_LIBCUDACXX_TEMPLATE(class... _OtherProperties)
|
| 585 |
+
_LIBCUDACXX_REQUIRES( (sizeof...(_Properties) == sizeof...(_OtherProperties))
|
| 586 |
+
&& _CUDA_VSTD::conjunction_v<_CUDA_VSTD::bool_constant<
|
| 587 |
+
_CUDA_VSTD::_One_of<_Properties, _OtherProperties...>>...>)
|
| 588 |
+
#endif
|
| 589 |
+
bool operator!=(
|
| 590 |
+
const basic_resource_ref<_Alloc_type, _OtherProperties...> &__right) const {
|
| 591 |
+
return !(*this == __right);
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
_LIBCUDACXX_TEMPLATE(class _Property)
|
| 595 |
+
_LIBCUDACXX_REQUIRES( (!property_with_value<_Property>) _LIBCUDACXX_AND _CUDA_VSTD::_One_of<_Property, _Properties...>) //
|
| 596 |
+
friend void get_property(const basic_resource_ref &, _Property) noexcept {}
|
| 597 |
+
|
| 598 |
+
_LIBCUDACXX_TEMPLATE(class _Property)
|
| 599 |
+
_LIBCUDACXX_REQUIRES( property_with_value<_Property> _LIBCUDACXX_AND _CUDA_VSTD::_One_of<_Property, _Properties...>) //
|
| 600 |
+
friend __property_value_t<_Property> get_property(
|
| 601 |
+
const basic_resource_ref &__res, _Property) noexcept {
|
| 602 |
+
return __res._Property_vtable<_Property>::__property_fn(__res.__object);
|
| 603 |
+
}
|
| 604 |
+
// clang-format on
|
| 605 |
+
};
|
| 606 |
+
|
| 607 |
+
template <_AllocType _Alloc_type, class... _Properties>
|
| 608 |
+
_LIBCUDACXX_INLINE_VAR constexpr bool _Is_basic_resource_ref<basic_resource_ref<_Alloc_type, _Properties...>> = true;
|
| 609 |
+
|
| 610 |
+
template <class... _Properties> //
|
| 611 |
+
using resource_ref = basic_resource_ref<_AllocType::_Default, _Properties...>;
|
| 612 |
+
|
| 613 |
+
template <class... _Properties> //
|
| 614 |
+
using async_resource_ref = basic_resource_ref<_AllocType::_Async, _Properties...>;
|
| 615 |
+
|
| 616 |
+
/// \struct device_accessible
|
| 617 |
+
/// \brief The \c device_accessible property signals that the allocated memory is device accessible
|
| 618 |
+
struct device_accessible{};
|
| 619 |
+
|
| 620 |
+
/// \struct host_accessible
|
| 621 |
+
/// \brief The \c host_accessible property signals that the allocated memory is host accessible
|
| 622 |
+
struct host_accessible{};
|
| 623 |
+
|
| 624 |
+
} // namespace mr
|
| 625 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA
|
| 626 |
+
#endif // _LIBCUDACXX_STD_VER > 11
|
| 627 |
+
|
| 628 |
+
#include <cuda/std/detail/__pragma_pop>
|
| 629 |
+
|
| 630 |
+
#endif // LIBCUDACXX_ENABLE_EXPERIMENTAL_MEMORY_RESOURCE
|
| 631 |
+
|
| 632 |
+
#endif //_LIBCUDACXX_BEGIN_NAMESPACE_CUDA
|
miniCUDA124/include/cuda/pipeline
ADDED
|
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* NVIDIA SOFTWARE LICENSE
|
| 5 |
+
*
|
| 6 |
+
* This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of the NVIDIA/CUDA C++ Library software and materials provided hereunder (“SOFTWARE”).
|
| 7 |
+
*
|
| 8 |
+
* This license can be accepted only by an adult of legal age of majority in the country in which the SOFTWARE is used. If you are under the legal age of majority, you must ask your parent or legal guardian to consent to this license. By taking delivery of the SOFTWARE, you affirm that you have reached the legal age of majority, you accept the terms of this license, and you take legal and financial responsibility for the actions of your permitted users.
|
| 9 |
+
*
|
| 10 |
+
* You agree to use the SOFTWARE only for purposes that are permitted by (a) this license, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions.
|
| 11 |
+
*
|
| 12 |
+
* 1. LICENSE. Subject to the terms of this license, NVIDIA grants you a non-exclusive limited license to: (a) install and use the SOFTWARE, and (b) distribute the SOFTWARE subject to the distribution requirements described in this license. NVIDIA reserves all rights, title and interest in and to the SOFTWARE not expressly granted to you under this license.
|
| 13 |
+
*
|
| 14 |
+
* 2. DISTRIBUTION REQUIREMENTS. These are the distribution requirements for you to exercise the distribution grant:
|
| 15 |
+
* a. The terms under which you distribute the SOFTWARE must be consistent with the terms of this license, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights.
|
| 16 |
+
* b. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SOFTWARE not in compliance with the requirements of this license, and to enforce the terms of your agreements with respect to distributed SOFTWARE.
|
| 17 |
+
*
|
| 18 |
+
* 3. LIMITATIONS. Your license to use the SOFTWARE is restricted as follows:
|
| 19 |
+
* a. The SOFTWARE is licensed for you to develop applications only for use in systems with NVIDIA GPUs.
|
| 20 |
+
* b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SOFTWARE or copies of the SOFTWARE.
|
| 21 |
+
* c. You may not modify or create derivative works of any portion of the SOFTWARE.
|
| 22 |
+
* d. You may not bypass, disable, or circumvent any technical measure, encryption, security, digital rights management or authentication mechanism in the SOFTWARE.
|
| 23 |
+
* e. You may not use the SOFTWARE in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SOFTWARE be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge.
|
| 24 |
+
* f. Unless you have an agreement with NVIDIA for this purpose, you may not use the SOFTWARE with any system or application where the use or failure of the system or application can reasonably be expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA does not design, test or manufacture the SOFTWARE for these critical uses and NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses.
|
| 25 |
+
* g. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to use of the SOFTWARE outside of the scope of this Agreement, or not in compliance with its terms.
|
| 26 |
+
*
|
| 27 |
+
* 4. PRE-RELEASE. SOFTWARE versions identified as alpha, beta, preview, early access or otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. You may use a pre-release SOFTWARE version at your own risk, understanding that these versions are not intended for use in production or business-critical systems.
|
| 28 |
+
*
|
| 29 |
+
* 5. OWNERSHIP. The SOFTWARE and the related intellectual property rights therein are and will remain the sole and exclusive property of NVIDIA or its licensors. The SOFTWARE is copyrighted and protected by the laws of the United States and other countries, and international treaty provisions. NVIDIA may make changes to the SOFTWARE, at any time without notice, but is not obligated to support or update the SOFTWARE.
|
| 30 |
+
*
|
| 31 |
+
* 6. COMPONENTS UNDER OTHER LICENSES. The SOFTWARE may include NVIDIA or third-party components with separate legal notices or terms as may be described in proprietary notices accompanying the SOFTWARE. If and to the extent there is a conflict between the terms in this license and the license terms associated with a component, the license terms associated with the components control only to the extent necessary to resolve the conflict.
|
| 32 |
+
*
|
| 33 |
+
* 7. FEEDBACK. You may, but don’t have to, provide to NVIDIA any Feedback. “Feedback” means any suggestions, bug fixes, enhancements, modifications, feature requests or other feedback regarding the SOFTWARE. For any Feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) the Feedback without the payment of any royalties or fees to you. NVIDIA will use Feedback at its choice.
|
| 34 |
+
*
|
| 35 |
+
* 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE SOFTWARE WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ALL ERRORS WILL BE CORRECTED.
|
| 36 |
+
*
|
| 37 |
+
* 9. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR PERFORMANCE OF THE SOFTWARE, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT.
|
| 38 |
+
*
|
| 39 |
+
* 10. TERMINATION. Your rights under this license will terminate automatically without notice from NVIDIA if you fail to comply with any term and condition of this license or if you commence or participate in any legal proceeding against NVIDIA with respect to the SOFTWARE. NVIDIA may terminate this license with advance written notice to you if NVIDIA decides to no longer provide the SOFTWARE in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. Upon any termination of this license, you agree to promptly discontinue use of the SOFTWARE and destroy all copies in your possession or control. Your prior distributions in accordance with this license are not affected by the termination of this license. All provisions of this license will survive termination, except for the license granted to you.
|
| 40 |
+
*
|
| 41 |
+
* 11. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction.
|
| 42 |
+
*
|
| 43 |
+
* 12. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect.
|
| 44 |
+
*
|
| 45 |
+
* 13. EXPORT. The SOFTWARE is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SOFTWARE into any country, or use the SOFTWARE in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this license, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SOFTWARE.
|
| 46 |
+
*
|
| 47 |
+
* 14. GOVERNMENT USE. The SOFTWARE has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this license pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051.
|
| 48 |
+
*
|
| 49 |
+
* 15. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the parties relating to the subject matter of this license and supersedes all prior or contemporaneous understandings and agreements relating to this subject matter, whether oral or written. If any court of competent jurisdiction determines that any provision of this license is illegal, invalid or unenforceable, the remaining provisions will remain in full force and effect. This license may only be modified in a writing signed by an authorized representative of each party.
|
| 50 |
+
*
|
| 51 |
+
* (v. August 20, 2021)
|
| 52 |
+
*/
|
| 53 |
+
#ifndef _CUDA_PIPELINE
|
| 54 |
+
#define _CUDA_PIPELINE
|
| 55 |
+
|
| 56 |
+
#include "barrier"
|
| 57 |
+
#include "atomic"
|
| 58 |
+
#include "std/chrono"
|
| 59 |
+
|
| 60 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA
|
| 61 |
+
|
| 62 |
+
// Forward declaration in barrier of pipeline
|
| 63 |
+
enum class pipeline_role {
|
| 64 |
+
producer,
|
| 65 |
+
consumer
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
template<thread_scope _Scope>
|
| 69 |
+
struct __pipeline_stage {
|
| 70 |
+
barrier<_Scope> __produced;
|
| 71 |
+
barrier<_Scope> __consumed;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
template<thread_scope _Scope, uint8_t _Stages_count>
|
| 75 |
+
class pipeline_shared_state {
|
| 76 |
+
public:
|
| 77 |
+
pipeline_shared_state() = default;
|
| 78 |
+
pipeline_shared_state(const pipeline_shared_state &) = delete;
|
| 79 |
+
pipeline_shared_state(pipeline_shared_state &&) = delete;
|
| 80 |
+
pipeline_shared_state & operator=(pipeline_shared_state &&) = delete;
|
| 81 |
+
pipeline_shared_state & operator=(const pipeline_shared_state &) = delete;
|
| 82 |
+
|
| 83 |
+
private:
|
| 84 |
+
__pipeline_stage<_Scope> __stages[_Stages_count];
|
| 85 |
+
atomic<uint32_t, _Scope> __refcount;
|
| 86 |
+
|
| 87 |
+
template<thread_scope _Pipeline_scope>
|
| 88 |
+
friend class pipeline;
|
| 89 |
+
|
| 90 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 91 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 92 |
+
pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state);
|
| 93 |
+
|
| 94 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 95 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 96 |
+
pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count);
|
| 97 |
+
|
| 98 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 99 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 100 |
+
pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role);
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
struct __pipeline_asm_helper {
|
| 104 |
+
_LIBCUDACXX_DEVICE
|
| 105 |
+
static inline uint32_t __lane_id()
|
| 106 |
+
{
|
| 107 |
+
NV_IF_ELSE_TARGET(
|
| 108 |
+
NV_IS_DEVICE,
|
| 109 |
+
(
|
| 110 |
+
uint32_t __lane_id;
|
| 111 |
+
asm volatile ("mov.u32 %0, %%laneid;" : "=r"(__lane_id));
|
| 112 |
+
return __lane_id;
|
| 113 |
+
),
|
| 114 |
+
(
|
| 115 |
+
return 0;
|
| 116 |
+
)
|
| 117 |
+
)
|
| 118 |
+
}
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
template<thread_scope _Scope>
|
| 122 |
+
class pipeline {
|
| 123 |
+
public:
|
| 124 |
+
pipeline(pipeline &&) = default;
|
| 125 |
+
pipeline(const pipeline &) = delete;
|
| 126 |
+
pipeline & operator=(pipeline &&) = delete;
|
| 127 |
+
pipeline & operator=(const pipeline &) = delete;
|
| 128 |
+
|
| 129 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 130 |
+
~pipeline()
|
| 131 |
+
{
|
| 132 |
+
if (__active) {
|
| 133 |
+
(void)quit();
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 138 |
+
bool quit()
|
| 139 |
+
{
|
| 140 |
+
bool __elected;
|
| 141 |
+
uint32_t __sub_count;
|
| 142 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 143 |
+
const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast<uintptr_t>(__shared_state_get_refcount()));
|
| 144 |
+
const uint32_t __elected_id = __ffs(__match_mask) - 1;
|
| 145 |
+
__elected = (__pipeline_asm_helper::__lane_id() == __elected_id);
|
| 146 |
+
__sub_count = __popc(__match_mask);
|
| 147 |
+
,
|
| 148 |
+
__elected = true;
|
| 149 |
+
__sub_count = 1;
|
| 150 |
+
)
|
| 151 |
+
bool __released = false;
|
| 152 |
+
if (__elected) {
|
| 153 |
+
const uint32_t __old = __shared_state_get_refcount()->fetch_sub(__sub_count);
|
| 154 |
+
const bool __last = (__old == __sub_count);
|
| 155 |
+
if (__last) {
|
| 156 |
+
for (uint8_t __stage = 0; __stage < __stages_count; ++__stage) {
|
| 157 |
+
__shared_state_get_stage(__stage)->__produced.~barrier();
|
| 158 |
+
__shared_state_get_stage(__stage)->__consumed.~barrier();
|
| 159 |
+
}
|
| 160 |
+
__released = true;
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
__active = false;
|
| 164 |
+
return __released;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 168 |
+
void producer_acquire()
|
| 169 |
+
{
|
| 170 |
+
barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__consumed;
|
| 171 |
+
__stage_barrier.wait_parity(__consumed_phase_parity);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 175 |
+
void producer_commit()
|
| 176 |
+
{
|
| 177 |
+
barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__head)->__produced;
|
| 178 |
+
(void)__memcpy_completion_impl::__defer(__completion_mechanism::__async_group, __single_thread_group{}, 0, __stage_barrier);
|
| 179 |
+
(void)__stage_barrier.arrive();
|
| 180 |
+
if (++__head == __stages_count) {
|
| 181 |
+
__head = 0;
|
| 182 |
+
__consumed_phase_parity = !__consumed_phase_parity;
|
| 183 |
+
}
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 187 |
+
void consumer_wait()
|
| 188 |
+
{
|
| 189 |
+
barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced;
|
| 190 |
+
__stage_barrier.wait_parity(__produced_phase_parity);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 194 |
+
void consumer_release()
|
| 195 |
+
{
|
| 196 |
+
(void)__shared_state_get_stage(__tail)->__consumed.arrive();
|
| 197 |
+
if (++__tail == __stages_count) {
|
| 198 |
+
__tail = 0;
|
| 199 |
+
__produced_phase_parity = !__produced_phase_parity;
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
template<class _Rep, class _Period>
|
| 204 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 205 |
+
bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration)
|
| 206 |
+
{
|
| 207 |
+
barrier<_Scope> & __stage_barrier = __shared_state_get_stage(__tail)->__produced;
|
| 208 |
+
return _CUDA_VSTD::__libcpp_thread_poll_with_backoff(
|
| 209 |
+
_CUDA_VSTD::__barrier_poll_tester_parity<barrier<_Scope>>(
|
| 210 |
+
&__stage_barrier,
|
| 211 |
+
__produced_phase_parity),
|
| 212 |
+
_CUDA_VSTD::chrono::duration_cast<_CUDA_VSTD::chrono::nanoseconds>(__duration)
|
| 213 |
+
);
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
template<class _Clock, class _Duration>
|
| 217 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 218 |
+
bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point)
|
| 219 |
+
{
|
| 220 |
+
return consumer_wait_for(__time_point - _Clock::now());
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
private:
|
| 224 |
+
uint8_t __head : 8;
|
| 225 |
+
uint8_t __tail : 8;
|
| 226 |
+
const uint8_t __stages_count : 8;
|
| 227 |
+
bool __consumed_phase_parity : 1;
|
| 228 |
+
bool __produced_phase_parity : 1;
|
| 229 |
+
bool __active : 1;
|
| 230 |
+
// TODO: Remove partitioned on next ABI break
|
| 231 |
+
const bool __partitioned : 1;
|
| 232 |
+
char * const __shared_state;
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 236 |
+
pipeline(char * __shared_state, uint8_t __stages_count, bool __partitioned)
|
| 237 |
+
: __head(0)
|
| 238 |
+
, __tail(0)
|
| 239 |
+
, __stages_count(__stages_count)
|
| 240 |
+
, __consumed_phase_parity(true)
|
| 241 |
+
, __produced_phase_parity(false)
|
| 242 |
+
, __active(true)
|
| 243 |
+
, __partitioned(__partitioned)
|
| 244 |
+
, __shared_state(__shared_state)
|
| 245 |
+
{}
|
| 246 |
+
|
| 247 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 248 |
+
__pipeline_stage<_Scope> * __shared_state_get_stage(uint8_t __stage)
|
| 249 |
+
{
|
| 250 |
+
ptrdiff_t __stage_offset = __stage * sizeof(__pipeline_stage<_Scope>);
|
| 251 |
+
return reinterpret_cast<__pipeline_stage<_Scope>*>(__shared_state + __stage_offset);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 255 |
+
atomic<uint32_t, _Scope> * __shared_state_get_refcount()
|
| 256 |
+
{
|
| 257 |
+
ptrdiff_t __refcount_offset = __stages_count * sizeof(__pipeline_stage<_Scope>);
|
| 258 |
+
return reinterpret_cast<atomic<uint32_t, _Scope>*>(__shared_state + __refcount_offset);
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 262 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 263 |
+
pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state);
|
| 264 |
+
|
| 265 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 266 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 267 |
+
pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, size_t __producer_count);
|
| 268 |
+
|
| 269 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 270 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 271 |
+
pipeline<_Pipeline_scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state, pipeline_role __role);
|
| 272 |
+
};
|
| 273 |
+
|
| 274 |
+
template<class _Group, thread_scope _Scope, uint8_t _Stages_count>
|
| 275 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 276 |
+
pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state)
|
| 277 |
+
{
|
| 278 |
+
const uint32_t __group_size = static_cast<uint32_t>(__group.size());
|
| 279 |
+
const uint32_t __thread_rank = static_cast<uint32_t>(__group.thread_rank());
|
| 280 |
+
|
| 281 |
+
if (__thread_rank == 0) {
|
| 282 |
+
for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) {
|
| 283 |
+
init(&__shared_state->__stages[__stage].__consumed, __group_size);
|
| 284 |
+
init(&__shared_state->__stages[__stage].__produced, __group_size);
|
| 285 |
+
}
|
| 286 |
+
__shared_state->__refcount.store(__group_size, std::memory_order_relaxed);
|
| 287 |
+
}
|
| 288 |
+
__group.sync();
|
| 289 |
+
|
| 290 |
+
return pipeline<_Scope>(reinterpret_cast<char*>(__shared_state->__stages), _Stages_count, false);
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
template<class _Group, thread_scope _Scope, uint8_t _Stages_count>
|
| 294 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 295 |
+
pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, size_t __producer_count)
|
| 296 |
+
{
|
| 297 |
+
const uint32_t __group_size = static_cast<uint32_t>(__group.size());
|
| 298 |
+
const uint32_t __thread_rank = static_cast<uint32_t>(__group.thread_rank());
|
| 299 |
+
|
| 300 |
+
if (__thread_rank == 0) {
|
| 301 |
+
const size_t __consumer_count = __group_size - __producer_count;
|
| 302 |
+
for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) {
|
| 303 |
+
init(&__shared_state->__stages[__stage].__consumed, __consumer_count);
|
| 304 |
+
init(&__shared_state->__stages[__stage].__produced, __producer_count);
|
| 305 |
+
}
|
| 306 |
+
__shared_state->__refcount.store(__group_size, std::memory_order_relaxed);
|
| 307 |
+
}
|
| 308 |
+
__group.sync();
|
| 309 |
+
|
| 310 |
+
return pipeline<_Scope>(reinterpret_cast<char*>(__shared_state->__stages), _Stages_count, true);
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
template<class _Group, thread_scope _Scope, uint8_t _Stages_count>
|
| 314 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 315 |
+
pipeline<_Scope> make_pipeline(const _Group & __group, pipeline_shared_state<_Scope, _Stages_count> * __shared_state, pipeline_role __role)
|
| 316 |
+
{
|
| 317 |
+
const uint32_t __group_size = static_cast<uint32_t>(__group.size());
|
| 318 |
+
const uint32_t __thread_rank = static_cast<uint32_t>(__group.thread_rank());
|
| 319 |
+
|
| 320 |
+
if (__thread_rank == 0) {
|
| 321 |
+
__shared_state->__refcount.store(0, std::memory_order_relaxed);
|
| 322 |
+
}
|
| 323 |
+
__group.sync();
|
| 324 |
+
|
| 325 |
+
if (__role == pipeline_role::producer) {
|
| 326 |
+
bool __elected;
|
| 327 |
+
uint32_t __add_count;
|
| 328 |
+
NV_IF_TARGET(NV_IS_DEVICE,
|
| 329 |
+
const uint32_t __match_mask = __match_any_sync(__activemask(), reinterpret_cast<uintptr_t>(&__shared_state->__refcount));
|
| 330 |
+
const uint32_t __elected_id = __ffs(__match_mask) - 1;
|
| 331 |
+
__elected = (__pipeline_asm_helper::__lane_id() == __elected_id);
|
| 332 |
+
__add_count = __popc(__match_mask);
|
| 333 |
+
,
|
| 334 |
+
__elected = true;
|
| 335 |
+
__add_count = 1;
|
| 336 |
+
)
|
| 337 |
+
if (__elected) {
|
| 338 |
+
(void)__shared_state->__refcount.fetch_add(__add_count, std::memory_order_relaxed);
|
| 339 |
+
}
|
| 340 |
+
}
|
| 341 |
+
__group.sync();
|
| 342 |
+
|
| 343 |
+
if (__thread_rank == 0) {
|
| 344 |
+
const uint32_t __producer_count = __shared_state->__refcount.load(std::memory_order_relaxed);
|
| 345 |
+
const uint32_t __consumer_count = __group_size - __producer_count;
|
| 346 |
+
for (uint8_t __stage = 0; __stage < _Stages_count; ++__stage) {
|
| 347 |
+
init(&__shared_state->__stages[__stage].__consumed, __consumer_count);
|
| 348 |
+
init(&__shared_state->__stages[__stage].__produced, __producer_count);
|
| 349 |
+
}
|
| 350 |
+
__shared_state->__refcount.store(__group_size, std::memory_order_relaxed);
|
| 351 |
+
}
|
| 352 |
+
__group.sync();
|
| 353 |
+
|
| 354 |
+
return pipeline<_Scope>(reinterpret_cast<char*>(__shared_state->__stages), _Stages_count, true);
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA
|
| 358 |
+
|
| 359 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE
|
| 360 |
+
|
| 361 |
+
template<uint8_t _Prior>
|
| 362 |
+
_LIBCUDACXX_DEVICE
|
| 363 |
+
void __pipeline_consumer_wait(pipeline<thread_scope_thread> & __pipeline);
|
| 364 |
+
|
| 365 |
+
_LIBCUDACXX_DEVICE
|
| 366 |
+
inline void __pipeline_consumer_wait(pipeline<thread_scope_thread> & __pipeline, uint8_t __prior);
|
| 367 |
+
|
| 368 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE
|
| 369 |
+
|
| 370 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA
|
| 371 |
+
|
| 372 |
+
template<>
|
| 373 |
+
class pipeline<thread_scope_thread> {
|
| 374 |
+
public:
|
| 375 |
+
pipeline(pipeline &&) = default;
|
| 376 |
+
pipeline(const pipeline &) = delete;
|
| 377 |
+
pipeline & operator=(pipeline &&) = delete;
|
| 378 |
+
pipeline & operator=(const pipeline &) = delete;
|
| 379 |
+
|
| 380 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 381 |
+
~pipeline() {}
|
| 382 |
+
|
| 383 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 384 |
+
bool quit()
|
| 385 |
+
{
|
| 386 |
+
return true;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 390 |
+
void producer_acquire() {}
|
| 391 |
+
|
| 392 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 393 |
+
void producer_commit()
|
| 394 |
+
{
|
| 395 |
+
NV_IF_TARGET(NV_PROVIDES_SM_80,
|
| 396 |
+
asm volatile ("cp.async.commit_group;");
|
| 397 |
+
++__head;
|
| 398 |
+
)
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 402 |
+
void consumer_wait()
|
| 403 |
+
{
|
| 404 |
+
NV_IF_TARGET(NV_PROVIDES_SM_80,
|
| 405 |
+
if (__head == __tail) {
|
| 406 |
+
return;
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
const uint8_t __prior = __head - __tail - 1;
|
| 410 |
+
device::__pipeline_consumer_wait(*this, __prior);
|
| 411 |
+
++__tail;
|
| 412 |
+
)
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 416 |
+
void consumer_release() {}
|
| 417 |
+
|
| 418 |
+
template<class _Rep, class _Period>
|
| 419 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 420 |
+
bool consumer_wait_for(const _CUDA_VSTD::chrono::duration<_Rep, _Period> & __duration)
|
| 421 |
+
{
|
| 422 |
+
(void)__duration;
|
| 423 |
+
consumer_wait();
|
| 424 |
+
return true;
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
template<class _Clock, class _Duration>
|
| 428 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 429 |
+
bool consumer_wait_until(const _CUDA_VSTD::chrono::time_point<_Clock, _Duration> & __time_point)
|
| 430 |
+
{
|
| 431 |
+
(void)__time_point;
|
| 432 |
+
consumer_wait();
|
| 433 |
+
return true;
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
private:
|
| 437 |
+
uint8_t __head;
|
| 438 |
+
uint8_t __tail;
|
| 439 |
+
|
| 440 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 441 |
+
pipeline()
|
| 442 |
+
: __head(0)
|
| 443 |
+
, __tail(0)
|
| 444 |
+
{}
|
| 445 |
+
|
| 446 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY inline pipeline<thread_scope_thread> make_pipeline();
|
| 447 |
+
|
| 448 |
+
template<uint8_t _Prior>
|
| 449 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 450 |
+
void pipeline_consumer_wait_prior(pipeline<thread_scope_thread> & __pipeline);
|
| 451 |
+
|
| 452 |
+
template<class _Group, thread_scope _Pipeline_scope, uint8_t _Pipeline_stages_count>
|
| 453 |
+
friend _LIBCUDACXX_INLINE_VISIBILITY
|
| 454 |
+
pipeline<_Pipeline_scope> __make_pipeline(const _Group & __group, pipeline_shared_state<_Pipeline_scope, _Pipeline_stages_count> * __shared_state);
|
| 455 |
+
};
|
| 456 |
+
|
| 457 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA
|
| 458 |
+
|
| 459 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA_DEVICE
|
| 460 |
+
|
| 461 |
+
template<uint8_t _Prior>
|
| 462 |
+
_LIBCUDACXX_DEVICE
|
| 463 |
+
void __pipeline_consumer_wait(pipeline<thread_scope_thread> & __pipeline)
|
| 464 |
+
{
|
| 465 |
+
(void)__pipeline;
|
| 466 |
+
NV_IF_TARGET(NV_PROVIDES_SM_80,
|
| 467 |
+
constexpr uint8_t __max_prior = 8;
|
| 468 |
+
|
| 469 |
+
asm volatile ("cp.async.wait_group %0;"
|
| 470 |
+
:
|
| 471 |
+
: "n"(_Prior < __max_prior ? _Prior : __max_prior));
|
| 472 |
+
)
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
_LIBCUDACXX_DEVICE
|
| 476 |
+
inline void __pipeline_consumer_wait(pipeline<thread_scope_thread> & __pipeline, uint8_t __prior)
|
| 477 |
+
{
|
| 478 |
+
switch (__prior) {
|
| 479 |
+
case 0: device::__pipeline_consumer_wait<0>(__pipeline); break;
|
| 480 |
+
case 1: device::__pipeline_consumer_wait<1>(__pipeline); break;
|
| 481 |
+
case 2: device::__pipeline_consumer_wait<2>(__pipeline); break;
|
| 482 |
+
case 3: device::__pipeline_consumer_wait<3>(__pipeline); break;
|
| 483 |
+
case 4: device::__pipeline_consumer_wait<4>(__pipeline); break;
|
| 484 |
+
case 5: device::__pipeline_consumer_wait<5>(__pipeline); break;
|
| 485 |
+
case 6: device::__pipeline_consumer_wait<6>(__pipeline); break;
|
| 486 |
+
case 7: device::__pipeline_consumer_wait<7>(__pipeline); break;
|
| 487 |
+
default: device::__pipeline_consumer_wait<8>(__pipeline); break;
|
| 488 |
+
}
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA_DEVICE
|
| 492 |
+
|
| 493 |
+
_LIBCUDACXX_BEGIN_NAMESPACE_CUDA
|
| 494 |
+
|
| 495 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 496 |
+
inline pipeline<thread_scope_thread> make_pipeline()
|
| 497 |
+
{
|
| 498 |
+
return pipeline<thread_scope_thread>();
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
template<uint8_t _Prior>
|
| 502 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 503 |
+
void pipeline_consumer_wait_prior(pipeline<thread_scope_thread> & __pipeline)
|
| 504 |
+
{
|
| 505 |
+
NV_IF_TARGET(NV_PROVIDES_SM_80,
|
| 506 |
+
device::__pipeline_consumer_wait<_Prior>(__pipeline);
|
| 507 |
+
__pipeline.__tail = __pipeline.__head - _Prior;
|
| 508 |
+
)
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
template<thread_scope _Scope>
|
| 512 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 513 |
+
void pipeline_producer_commit(pipeline<thread_scope_thread> & __pipeline, barrier<_Scope> & __barrier)
|
| 514 |
+
{
|
| 515 |
+
(void)__pipeline;
|
| 516 |
+
NV_IF_TARGET(NV_PROVIDES_SM_80,(
|
| 517 |
+
(void)__memcpy_completion_impl::__defer(__completion_mechanism::__async_group, __single_thread_group{}, 0, __barrier);
|
| 518 |
+
));
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
template<typename _Group, class _Tp, typename _Size, thread_scope _Scope>
|
| 522 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 523 |
+
async_contract_fulfillment __memcpy_async_pipeline(_Group const & __group, _Tp * __destination, _Tp const * __source, _Size __size, pipeline<_Scope> & __pipeline) {
|
| 524 |
+
// 1. Set the completion mechanisms that can be used.
|
| 525 |
+
//
|
| 526 |
+
// Do not (yet) allow async_bulk_group completion. Do not allow
|
| 527 |
+
// mbarrier_complete_tx completion, even though it may be possible if
|
| 528 |
+
// the pipeline has stage barriers in shared memory.
|
| 529 |
+
_CUDA_VSTD::uint32_t __allowed_completions = _CUDA_VSTD::uint32_t(__completion_mechanism::__async_group);
|
| 530 |
+
|
| 531 |
+
// Alignment: Use the maximum of the alignment of _Tp and that of a possible cuda::aligned_size_t.
|
| 532 |
+
constexpr _CUDA_VSTD::size_t __size_align = __get_size_align<_Size>::align;
|
| 533 |
+
constexpr _CUDA_VSTD::size_t __align = (alignof(_Tp) < __size_align) ? __size_align : alignof(_Tp);
|
| 534 |
+
// Cast to char pointers. We don't need the type for alignment anymore and
|
| 535 |
+
// erasing the types reduces the number of instantiations of down-stream
|
| 536 |
+
// functions.
|
| 537 |
+
char * __dest_char = reinterpret_cast<char*>(__destination);
|
| 538 |
+
char const * __src_char = reinterpret_cast<char const *>(__source);
|
| 539 |
+
|
| 540 |
+
// 2. Issue actual copy instructions.
|
| 541 |
+
auto __cm = __dispatch_memcpy_async<__align>(__group, __dest_char, __src_char, __size, __allowed_completions);
|
| 542 |
+
|
| 543 |
+
// 3. No need to synchronize with copy instructions.
|
| 544 |
+
return __memcpy_completion_impl::__defer(__cm, __group, __size, __pipeline);
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
template<typename _Group, class _Type, thread_scope _Scope>
|
| 548 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 549 |
+
async_contract_fulfillment memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) {
|
| 550 |
+
return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline);
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
template<typename _Group, class _Type, std::size_t _Alignment, thread_scope _Scope, std::size_t _Larger_alignment = (alignof(_Type) > _Alignment) ? alignof(_Type) : _Alignment>
|
| 554 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 555 |
+
async_contract_fulfillment memcpy_async(_Group const & __group, _Type * __destination, _Type const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) {
|
| 556 |
+
return __memcpy_async_pipeline(__group, __destination, __source, __size, __pipeline);
|
| 557 |
+
}
|
| 558 |
+
|
| 559 |
+
template<class _Type, typename _Size, thread_scope _Scope>
|
| 560 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 561 |
+
async_contract_fulfillment memcpy_async(_Type * __destination, _Type const * __source, _Size __size, pipeline<_Scope> & __pipeline) {
|
| 562 |
+
return __memcpy_async_pipeline(__single_thread_group{}, __destination, __source, __size, __pipeline);
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
template<typename _Group, thread_scope _Scope>
|
| 566 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 567 |
+
async_contract_fulfillment memcpy_async(_Group const & __group, void * __destination, void const * __source, std::size_t __size, pipeline<_Scope> & __pipeline) {
|
| 568 |
+
return __memcpy_async_pipeline(__group, reinterpret_cast<char *>(__destination), reinterpret_cast<char const *>(__source), __size, __pipeline);
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
template<typename _Group, std::size_t _Alignment, thread_scope _Scope>
|
| 572 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 573 |
+
async_contract_fulfillment memcpy_async(_Group const & __group, void * __destination, void const * __source, aligned_size_t<_Alignment> __size, pipeline<_Scope> & __pipeline) {
|
| 574 |
+
return __memcpy_async_pipeline(__group, reinterpret_cast<char*>(__destination), reinterpret_cast<char const *>(__source), __size, __pipeline);
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
template<typename _Size, thread_scope _Scope>
|
| 578 |
+
_LIBCUDACXX_INLINE_VISIBILITY
|
| 579 |
+
async_contract_fulfillment memcpy_async(void * __destination, void const * __source, _Size __size, pipeline<_Scope> & __pipeline) {
|
| 580 |
+
return __memcpy_async_pipeline(__single_thread_group{}, reinterpret_cast<char*>(__destination), reinterpret_cast<char const *>(__source), __size, __pipeline);
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
_LIBCUDACXX_END_NAMESPACE_CUDA
|
| 584 |
+
|
| 585 |
+
#endif //_CUDA_PIPELINE
|