ZTWHHH commited on
Commit
533dd04
·
verified ·
1 Parent(s): d55d1f9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. openflamingo/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so +3 -0
  3. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h +595 -0
  4. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h +348 -0
  5. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h +0 -0
  6. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h +96 -0
  7. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h +78 -0
  8. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h +994 -0
  9. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h +282 -0
  10. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h +90 -0
  11. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h +211 -0
  12. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h +642 -0
  13. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h +1929 -0
  14. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h +224 -0
  15. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h +0 -0
  16. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h +198 -0
  17. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h +57 -0
  18. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h +118 -0
  19. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h +0 -0
  20. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h +65 -0
  21. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h +105 -0
  22. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h +65 -0
  23. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h +60 -0
  24. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h +215 -0
  25. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp +604 -0
  26. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp +134 -0
  27. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h +510 -0
  28. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp +588 -0
  29. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h +58 -0
  30. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h +539 -0
  31. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp +527 -0
  32. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h +439 -0
  33. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h +286 -0
  34. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h +229 -0
  35. openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h +175 -0
  36. openflamingo/lib/python3.10/site-packages/tokenizers.libs/libk5crypto-b1f99d5c.so.3.1 +3 -0
  37. phi4/lib/python3.10/site-packages/PIL/__pycache__/FontFile.cpython-310.pyc +0 -0
  38. phi4/lib/python3.10/site-packages/PIL/__pycache__/GimpGradientFile.cpython-310.pyc +0 -0
  39. phi4/lib/python3.10/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-310.pyc +0 -0
  40. phi4/lib/python3.10/site-packages/PIL/__pycache__/ImageMath.cpython-310.pyc +0 -0
  41. phi4/lib/python3.10/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-310.pyc +0 -0
  42. phi4/lib/python3.10/site-packages/PIL/__pycache__/MspImagePlugin.cpython-310.pyc +0 -0
  43. phi4/lib/python3.10/site-packages/PIL/__pycache__/PaletteFile.cpython-310.pyc +0 -0
  44. phi4/lib/python3.10/site-packages/PIL/__pycache__/PngImagePlugin.cpython-310.pyc +0 -0
  45. phi4/lib/python3.10/site-packages/PIL/__pycache__/QoiImagePlugin.cpython-310.pyc +0 -0
  46. phi4/lib/python3.10/site-packages/PIL/__pycache__/report.cpython-310.pyc +0 -0
  47. phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/INSTALLER +1 -0
  48. phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/LICENSE +26 -0
  49. phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/METADATA +40 -0
  50. phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/RECORD +49 -0
.gitattributes CHANGED
@@ -751,3 +751,5 @@ phi4/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.p
751
  phi4/lib/python3.10/site-packages/torchvision.libs/libwebp.54a0d02a.so.7 filter=lfs diff=lfs merge=lfs -text
752
  phi4/lib/python3.10/site-packages/torchvision.libs/libjpeg.1c1c4b09.so.8 filter=lfs diff=lfs merge=lfs -text
753
  openflamingo/lib/python3.10/site-packages/pip/_vendor/distlib/w64-arm.exe filter=lfs diff=lfs merge=lfs -text
 
 
 
751
  phi4/lib/python3.10/site-packages/torchvision.libs/libwebp.54a0d02a.so.7 filter=lfs diff=lfs merge=lfs -text
752
  phi4/lib/python3.10/site-packages/torchvision.libs/libjpeg.1c1c4b09.so.8 filter=lfs diff=lfs merge=lfs -text
753
  openflamingo/lib/python3.10/site-packages/pip/_vendor/distlib/w64-arm.exe filter=lfs diff=lfs merge=lfs -text
754
+ openflamingo/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
755
+ openflamingo/lib/python3.10/site-packages/tokenizers.libs/libk5crypto-b1f99d5c.so.3.1 filter=lfs diff=lfs merge=lfs -text
openflamingo/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:586f912f8b0986df1968fc606531db587031c336df5f284847e96b91069c904c
3
+ size 4498264
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/channel_descriptor.h ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CHANNEL_DESCRIPTOR_H__)
51
+ #define __CHANNEL_DESCRIPTOR_H__
52
+
53
+ #if defined(__cplusplus)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #include "cuda_runtime_api.h"
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ /**
70
+ * \addtogroup CUDART_HIGHLEVEL
71
+ *
72
+ * @{
73
+ */
74
+
75
+ /**
76
+ * \brief \hl Returns a channel descriptor using the specified format
77
+ *
78
+ * Returns a channel descriptor with format \p f and number of bits of each
79
+ * component \p x, \p y, \p z, and \p w. The ::cudaChannelFormatDesc is
80
+ * defined as:
81
+ * \code
82
+ struct cudaChannelFormatDesc {
83
+ int x, y, z, w;
84
+ enum cudaChannelFormatKind f;
85
+ };
86
+ * \endcode
87
+ *
88
+ * where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,
89
+ * ::cudaChannelFormatKindUnsigned, cudaChannelFormatKindFloat,
90
+ * ::cudaChannelFormatKindSignedNormalized8X1, ::cudaChannelFormatKindSignedNormalized8X2,
91
+ * ::cudaChannelFormatKindSignedNormalized8X4,
92
+ * ::cudaChannelFormatKindUnsignedNormalized8X1, ::cudaChannelFormatKindUnsignedNormalized8X2,
93
+ * ::cudaChannelFormatKindUnsignedNormalized8X4,
94
+ * ::cudaChannelFormatKindSignedNormalized16X1, ::cudaChannelFormatKindSignedNormalized16X2,
95
+ * ::cudaChannelFormatKindSignedNormalized16X4,
96
+ * ::cudaChannelFormatKindUnsignedNormalized16X1, ::cudaChannelFormatKindUnsignedNormalized16X2,
97
+ * ::cudaChannelFormatKindUnsignedNormalized16X4
98
+ * or ::cudaChannelFormatKindNV12.
99
+ *
100
+ * The format is specified by the template specialization.
101
+ *
102
+ * The template function specializes for the following scalar types:
103
+ * char, signed char, unsigned char, short, unsigned short, int, unsigned int, long, unsigned long, and float.
104
+ * The template function specializes for the following vector types:
105
+ * char{1|2|4}, uchar{1|2|4}, short{1|2|4}, ushort{1|2|4}, int{1|2|4}, uint{1|2|4}, long{1|2|4}, ulong{1|2|4}, float{1|2|4}.
106
+ * The template function specializes for following cudaChannelFormatKind enum values:
107
+ * ::cudaChannelFormatKind{Uns|S}ignedNormalized{8|16}X{1|2|4}, and ::cudaChannelFormatKindNV12.
108
+ *
109
+ * Invoking the function on a type without a specialization defaults to creating a channel format of kind ::cudaChannelFormatKindNone
110
+ *
111
+ * \return
112
+ * Channel descriptor with format \p f
113
+ *
114
+ * \sa \ref ::cudaCreateChannelDesc(int,int,int,int,cudaChannelFormatKind) "cudaCreateChannelDesc (Low level)",
115
+ * ::cudaGetChannelDesc, ::cudaGetTextureReference,
116
+ * \ref ::cudaBindTexture(size_t*, const struct texture< T, dim, readMode>&, const void*, const struct cudaChannelFormatDesc&, size_t) "cudaBindTexture (High level)",
117
+ * \ref ::cudaBindTexture(size_t*, const struct texture< T, dim, readMode>&, const void*, size_t) "cudaBindTexture (High level, inherited channel descriptor)",
118
+ * \ref ::cudaBindTexture2D(size_t*, const struct texture< T, dim, readMode>&, const void*, const struct cudaChannelFormatDesc&, size_t, size_t, size_t) "cudaBindTexture2D (High level)",
119
+ * \ref ::cudaBindTextureToArray(const struct texture< T, dim, readMode>&, cudaArray_const_t, const struct cudaChannelFormatDesc&) "cudaBindTextureToArray (High level)",
120
+ * \ref ::cudaBindTextureToArray(const struct texture< T, dim, readMode>&, cudaArray_const_t) "cudaBindTextureToArray (High level, inherited channel descriptor)",
121
+ * \ref ::cudaUnbindTexture(const struct texture< T, dim, readMode>&) "cudaUnbindTexture (High level)",
122
+ * \ref ::cudaGetTextureAlignmentOffset(size_t*, const struct texture< T, dim, readMode>&) "cudaGetTextureAlignmentOffset (High level)"
123
+ */
124
+ template<class T> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
125
+ {
126
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
127
+ }
128
+
129
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf(void)
130
+ {
131
+ int e = (int)sizeof(unsigned short) * 8;
132
+
133
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
134
+ }
135
+
136
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf1(void)
137
+ {
138
+ int e = (int)sizeof(unsigned short) * 8;
139
+
140
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
141
+ }
142
+
143
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf2(void)
144
+ {
145
+ int e = (int)sizeof(unsigned short) * 8;
146
+
147
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
148
+ }
149
+
150
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescHalf4(void)
151
+ {
152
+ int e = (int)sizeof(unsigned short) * 8;
153
+
154
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
155
+ }
156
+
157
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char>(void)
158
+ {
159
+ int e = (int)sizeof(char) * 8;
160
+
161
+ #if defined(_CHAR_UNSIGNED) || defined(__CHAR_UNSIGNED__)
162
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
163
+ #else /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
164
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
165
+ #endif /* _CHAR_UNSIGNED || __CHAR_UNSIGNED__ */
166
+ }
167
+
168
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<signed char>(void)
169
+ {
170
+ int e = (int)sizeof(signed char) * 8;
171
+
172
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
173
+ }
174
+
175
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned char>(void)
176
+ {
177
+ int e = (int)sizeof(unsigned char) * 8;
178
+
179
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
180
+ }
181
+
182
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char1>(void)
183
+ {
184
+ int e = (int)sizeof(signed char) * 8;
185
+
186
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
187
+ }
188
+
189
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar1>(void)
190
+ {
191
+ int e = (int)sizeof(unsigned char) * 8;
192
+
193
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
194
+ }
195
+
196
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char2>(void)
197
+ {
198
+ int e = (int)sizeof(signed char) * 8;
199
+
200
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
201
+ }
202
+
203
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar2>(void)
204
+ {
205
+ int e = (int)sizeof(unsigned char) * 8;
206
+
207
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
208
+ }
209
+
210
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<char4>(void)
211
+ {
212
+ int e = (int)sizeof(signed char) * 8;
213
+
214
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
215
+ }
216
+
217
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uchar4>(void)
218
+ {
219
+ int e = (int)sizeof(unsigned char) * 8;
220
+
221
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
222
+ }
223
+
224
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short>(void)
225
+ {
226
+ int e = (int)sizeof(short) * 8;
227
+
228
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
229
+ }
230
+
231
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned short>(void)
232
+ {
233
+ int e = (int)sizeof(unsigned short) * 8;
234
+
235
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
236
+ }
237
+
238
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short1>(void)
239
+ {
240
+ int e = (int)sizeof(short) * 8;
241
+
242
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
243
+ }
244
+
245
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort1>(void)
246
+ {
247
+ int e = (int)sizeof(unsigned short) * 8;
248
+
249
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
250
+ }
251
+
252
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short2>(void)
253
+ {
254
+ int e = (int)sizeof(short) * 8;
255
+
256
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
257
+ }
258
+
259
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort2>(void)
260
+ {
261
+ int e = (int)sizeof(unsigned short) * 8;
262
+
263
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
264
+ }
265
+
266
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<short4>(void)
267
+ {
268
+ int e = (int)sizeof(short) * 8;
269
+
270
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
271
+ }
272
+
273
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ushort4>(void)
274
+ {
275
+ int e = (int)sizeof(unsigned short) * 8;
276
+
277
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
278
+ }
279
+
280
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int>(void)
281
+ {
282
+ int e = (int)sizeof(int) * 8;
283
+
284
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
285
+ }
286
+
287
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned int>(void)
288
+ {
289
+ int e = (int)sizeof(unsigned int) * 8;
290
+
291
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
292
+ }
293
+
294
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int1>(void)
295
+ {
296
+ int e = (int)sizeof(int) * 8;
297
+
298
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
299
+ }
300
+
301
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint1>(void)
302
+ {
303
+ int e = (int)sizeof(unsigned int) * 8;
304
+
305
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
306
+ }
307
+
308
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int2>(void)
309
+ {
310
+ int e = (int)sizeof(int) * 8;
311
+
312
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
313
+ }
314
+
315
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint2>(void)
316
+ {
317
+ int e = (int)sizeof(unsigned int) * 8;
318
+
319
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
320
+ }
321
+
322
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<int4>(void)
323
+ {
324
+ int e = (int)sizeof(int) * 8;
325
+
326
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
327
+ }
328
+
329
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<uint4>(void)
330
+ {
331
+ int e = (int)sizeof(unsigned int) * 8;
332
+
333
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
334
+ }
335
+
336
+ #if !defined(__LP64__)
337
+
338
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long>(void)
339
+ {
340
+ int e = (int)sizeof(long) * 8;
341
+
342
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
343
+ }
344
+
345
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<unsigned long>(void)
346
+ {
347
+ int e = (int)sizeof(unsigned long) * 8;
348
+
349
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
350
+ }
351
+
352
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long1>(void)
353
+ {
354
+ int e = (int)sizeof(long) * 8;
355
+
356
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindSigned);
357
+ }
358
+
359
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong1>(void)
360
+ {
361
+ int e = (int)sizeof(unsigned long) * 8;
362
+
363
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindUnsigned);
364
+ }
365
+
366
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long2>(void)
367
+ {
368
+ int e = (int)sizeof(long) * 8;
369
+
370
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindSigned);
371
+ }
372
+
373
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong2>(void)
374
+ {
375
+ int e = (int)sizeof(unsigned long) * 8;
376
+
377
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindUnsigned);
378
+ }
379
+
380
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<long4>(void)
381
+ {
382
+ int e = (int)sizeof(long) * 8;
383
+
384
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindSigned);
385
+ }
386
+
387
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<ulong4>(void)
388
+ {
389
+ int e = (int)sizeof(unsigned long) * 8;
390
+
391
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindUnsigned);
392
+ }
393
+
394
+ #endif /* !__LP64__ */
395
+
396
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float>(void)
397
+ {
398
+ int e = (int)sizeof(float) * 8;
399
+
400
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
401
+ }
402
+
403
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float1>(void)
404
+ {
405
+ int e = (int)sizeof(float) * 8;
406
+
407
+ return cudaCreateChannelDesc(e, 0, 0, 0, cudaChannelFormatKindFloat);
408
+ }
409
+
410
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float2>(void)
411
+ {
412
+ int e = (int)sizeof(float) * 8;
413
+
414
+ return cudaCreateChannelDesc(e, e, 0, 0, cudaChannelFormatKindFloat);
415
+ }
416
+
417
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<float4>(void)
418
+ {
419
+ int e = (int)sizeof(float) * 8;
420
+
421
+ return cudaCreateChannelDesc(e, e, e, e, cudaChannelFormatKindFloat);
422
+ }
423
+
424
+ static __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDescNV12(void)
425
+ {
426
+ int e = (int)sizeof(char) * 8;
427
+
428
+ return cudaCreateChannelDesc(e, e, e, 0, cudaChannelFormatKindNV12);
429
+ }
430
+
431
+ template<cudaChannelFormatKind> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc(void)
432
+ {
433
+ return cudaCreateChannelDesc(0, 0, 0, 0, cudaChannelFormatKindNone);
434
+ }
435
+
436
+ /* Signed 8-bit normalized integer formats */
437
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X1>(void)
438
+ {
439
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedNormalized8X1);
440
+ }
441
+
442
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X2>(void)
443
+ {
444
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedNormalized8X2);
445
+ }
446
+
447
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized8X4>(void)
448
+ {
449
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindSignedNormalized8X4);
450
+ }
451
+
452
+ /* Unsigned 8-bit normalized integer formats */
453
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X1>(void)
454
+ {
455
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized8X1);
456
+ }
457
+
458
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X2>(void)
459
+ {
460
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedNormalized8X2);
461
+ }
462
+
463
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized8X4>(void)
464
+ {
465
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedNormalized8X4);
466
+ }
467
+
468
+ /* Signed 16-bit normalized integer formats */
469
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X1>(void)
470
+ {
471
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindSignedNormalized16X1);
472
+ }
473
+
474
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X2>(void)
475
+ {
476
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindSignedNormalized16X2);
477
+ }
478
+
479
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedNormalized16X4>(void)
480
+ {
481
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindSignedNormalized16X4);
482
+ }
483
+
484
+ /* Unsigned 16-bit normalized integer formats */
485
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X1>(void)
486
+ {
487
+ return cudaCreateChannelDesc(16, 0, 0, 0, cudaChannelFormatKindUnsignedNormalized16X1);
488
+ }
489
+
490
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X2>(void)
491
+ {
492
+ return cudaCreateChannelDesc(16, 16, 0, 0, cudaChannelFormatKindUnsignedNormalized16X2);
493
+ }
494
+
495
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedNormalized16X4>(void)
496
+ {
497
+ return cudaCreateChannelDesc(16, 16, 16, 16, cudaChannelFormatKindUnsignedNormalized16X4);
498
+ }
499
+
500
+ /* NV12 format */
501
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindNV12>(void)
502
+ {
503
+ return cudaCreateChannelDesc(8, 8, 8, 0, cudaChannelFormatKindNV12);
504
+ }
505
+
506
+ /* BC1 format */
507
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1>(void)
508
+ {
509
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1);
510
+ }
511
+
512
+ /* BC1sRGB format */
513
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed1SRGB>(void)
514
+ {
515
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed1SRGB);
516
+ }
517
+
518
+ /* BC2 format */
519
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2>(void)
520
+ {
521
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2);
522
+ }
523
+
524
+ /* BC2sRGB format */
525
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed2SRGB>(void)
526
+ {
527
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed2SRGB);
528
+ }
529
+
530
+ /* BC3 format */
531
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3>(void)
532
+ {
533
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3);
534
+ }
535
+
536
+ /* BC3sRGB format */
537
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed3SRGB>(void)
538
+ {
539
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed3SRGB);
540
+ }
541
+
542
+ /* BC4 unsigned format */
543
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed4>(void)
544
+ {
545
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed4);
546
+ }
547
+
548
+ /* BC4 signed format */
549
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed4>(void)
550
+ {
551
+ return cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSignedBlockCompressed4);
552
+ }
553
+
554
+ /* BC5 unsigned format */
555
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed5>(void)
556
+ {
557
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindUnsignedBlockCompressed5);
558
+ }
559
+
560
+ /* BC5 signed format */
561
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed5>(void)
562
+ {
563
+ return cudaCreateChannelDesc(8, 8, 0, 0, cudaChannelFormatKindSignedBlockCompressed5);
564
+ }
565
+
566
+ /* BC6H unsigned format */
567
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed6H>(void)
568
+ {
569
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindUnsignedBlockCompressed6H);
570
+ }
571
+
572
+ /* BC6H signed format */
573
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindSignedBlockCompressed6H>(void)
574
+ {
575
+ return cudaCreateChannelDesc(16, 16, 16, 0, cudaChannelFormatKindSignedBlockCompressed6H);
576
+ }
577
+
578
+ /* BC7 format */
579
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7>(void)
580
+ {
581
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7);
582
+ }
583
+
584
+ /* BC7sRGB format */
585
+ template<> __inline__ __host__ cudaChannelFormatDesc cudaCreateChannelDesc<cudaChannelFormatKindUnsignedBlockCompressed7SRGB>(void)
586
+ {
587
+ return cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsignedBlockCompressed7SRGB);
588
+ }
589
+
590
+ #endif /* __cplusplus */
591
+
592
+ /** @} */
593
+ /** @} */ /* END CUDART_TEXTURE_HL */
594
+
595
+ #endif /* !__CHANNEL_DESCRIPTOR_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(CU_COMPLEX_H_)
51
+ #define CU_COMPLEX_H_
52
+
53
+ #if !defined(__CUDACC_RTC__)
54
+ #if defined(__GNUC__)
55
+ #if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)))
56
+ #pragma GCC diagnostic ignored "-Wunused-function"
57
+ #endif
58
+ #endif
59
+ #endif
60
+
61
+ /* When trying to include C header file in C++ Code extern "C" is required
62
+ * But the Standard QNX headers already have ifdef extern in them when compiling C++ Code
63
+ * extern "C" cannot be nested
64
+ * Hence keep the header out of extern "C" block
65
+ */
66
+
67
+ #if !defined(__CUDACC__)
68
+ #include <math.h> /* import fabsf, sqrt */
69
+ #endif /* !defined(__CUDACC__) */
70
+
71
+ #if defined(__cplusplus)
72
+ extern "C" {
73
+ #endif /* __cplusplus */
74
+
75
+ #include "vector_types.h"
76
+
77
+ typedef float2 cuFloatComplex;
78
+
79
+ __host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x)
80
+ {
81
+ return x.x;
82
+ }
83
+
84
+ __host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x)
85
+ {
86
+ return x.y;
87
+ }
88
+
89
+ __host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex
90
+ (float r, float i)
91
+ {
92
+ cuFloatComplex res;
93
+ res.x = r;
94
+ res.y = i;
95
+ return res;
96
+ }
97
+
98
+ __host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x)
99
+ {
100
+ return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x));
101
+ }
102
+ __host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x,
103
+ cuFloatComplex y)
104
+ {
105
+ return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y),
106
+ cuCimagf(x) + cuCimagf(y));
107
+ }
108
+
109
+ __host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x,
110
+ cuFloatComplex y)
111
+ {
112
+ return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y),
113
+ cuCimagf(x) - cuCimagf(y));
114
+ }
115
+
116
+ /* This implementation could suffer from intermediate overflow even though
117
+ * the final result would be in range. However, various implementations do
118
+ * not guard against this (presumably to avoid losing performance), so we
119
+ * don't do it either to stay competitive.
120
+ */
121
+ __host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x,
122
+ cuFloatComplex y)
123
+ {
124
+ cuFloatComplex prod;
125
+ prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) -
126
+ (cuCimagf(x) * cuCimagf(y)),
127
+ (cuCrealf(x) * cuCimagf(y)) +
128
+ (cuCimagf(x) * cuCrealf(y)));
129
+ return prod;
130
+ }
131
+
132
+ /* This implementation guards against intermediate underflow and overflow
133
+ * by scaling. Such guarded implementations are usually the default for
134
+ * complex library implementations, with some also offering an unguarded,
135
+ * faster version.
136
+ */
137
+ __host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x,
138
+ cuFloatComplex y)
139
+ {
140
+ cuFloatComplex quot;
141
+ float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y));
142
+ float oos = 1.0f / s;
143
+ float ars = cuCrealf(x) * oos;
144
+ float ais = cuCimagf(x) * oos;
145
+ float brs = cuCrealf(y) * oos;
146
+ float bis = cuCimagf(y) * oos;
147
+ s = (brs * brs) + (bis * bis);
148
+ oos = 1.0f / s;
149
+ quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos,
150
+ ((ais * brs) - (ars * bis)) * oos);
151
+ return quot;
152
+ }
153
+
154
+ /*
155
+ * We would like to call hypotf(), but it's not available on all platforms.
156
+ * This discrete implementation guards against intermediate underflow and
157
+ * overflow by scaling. Otherwise we would lose half the exponent range.
158
+ * There are various ways of doing guarded computation. For now chose the
159
+ * simplest and fastest solution, however this may suffer from inaccuracies
160
+ * if sqrt and division are not IEEE compliant.
161
+ */
162
+ __host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x)
163
+ {
164
+ float a = cuCrealf(x);
165
+ float b = cuCimagf(x);
166
+ float v, w, t;
167
+ a = fabsf(a);
168
+ b = fabsf(b);
169
+ if (a > b) {
170
+ v = a;
171
+ w = b;
172
+ } else {
173
+ v = b;
174
+ w = a;
175
+ }
176
+ t = w / v;
177
+ t = 1.0f + t * t;
178
+ t = v * sqrtf(t);
179
+ if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
180
+ t = v + w;
181
+ }
182
+ return t;
183
+ }
184
+
185
+ /* Double precision */
186
+ typedef double2 cuDoubleComplex;
187
+
188
+ __host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x)
189
+ {
190
+ return x.x;
191
+ }
192
+
193
+ __host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x)
194
+ {
195
+ return x.y;
196
+ }
197
+
198
+ __host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex
199
+ (double r, double i)
200
+ {
201
+ cuDoubleComplex res;
202
+ res.x = r;
203
+ res.y = i;
204
+ return res;
205
+ }
206
+
207
+ __host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x)
208
+ {
209
+ return make_cuDoubleComplex (cuCreal(x), -cuCimag(x));
210
+ }
211
+
212
+ __host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x,
213
+ cuDoubleComplex y)
214
+ {
215
+ return make_cuDoubleComplex (cuCreal(x) + cuCreal(y),
216
+ cuCimag(x) + cuCimag(y));
217
+ }
218
+
219
+ __host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x,
220
+ cuDoubleComplex y)
221
+ {
222
+ return make_cuDoubleComplex (cuCreal(x) - cuCreal(y),
223
+ cuCimag(x) - cuCimag(y));
224
+ }
225
+
226
+ /* This implementation could suffer from intermediate overflow even though
227
+ * the final result would be in range. However, various implementations do
228
+ * not guard against this (presumably to avoid losing performance), so we
229
+ * don't do it either to stay competitive.
230
+ */
231
+ __host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x,
232
+ cuDoubleComplex y)
233
+ {
234
+ cuDoubleComplex prod;
235
+ prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) -
236
+ (cuCimag(x) * cuCimag(y)),
237
+ (cuCreal(x) * cuCimag(y)) +
238
+ (cuCimag(x) * cuCreal(y)));
239
+ return prod;
240
+ }
241
+
242
+ /* This implementation guards against intermediate underflow and overflow
243
+ * by scaling. Such guarded implementations are usually the default for
244
+ * complex library implementations, with some also offering an unguarded,
245
+ * faster version.
246
+ */
247
+ __host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x,
248
+ cuDoubleComplex y)
249
+ {
250
+ cuDoubleComplex quot;
251
+ double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y)));
252
+ double oos = 1.0 / s;
253
+ double ars = cuCreal(x) * oos;
254
+ double ais = cuCimag(x) * oos;
255
+ double brs = cuCreal(y) * oos;
256
+ double bis = cuCimag(y) * oos;
257
+ s = (brs * brs) + (bis * bis);
258
+ oos = 1.0 / s;
259
+ quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos,
260
+ ((ais * brs) - (ars * bis)) * oos);
261
+ return quot;
262
+ }
263
+
264
+ /* This implementation guards against intermediate underflow and overflow
265
+ * by scaling. Otherwise we would lose half the exponent range. There are
266
+ * various ways of doing guarded computation. For now chose the simplest
267
+ * and fastest solution, however this may suffer from inaccuracies if sqrt
268
+ * and division are not IEEE compliant.
269
+ */
270
+ __host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x)
271
+ {
272
+ double a = cuCreal(x);
273
+ double b = cuCimag(x);
274
+ double v, w, t;
275
+ a = fabs(a);
276
+ b = fabs(b);
277
+ if (a > b) {
278
+ v = a;
279
+ w = b;
280
+ } else {
281
+ v = b;
282
+ w = a;
283
+ }
284
+ t = w / v;
285
+ t = 1.0 + t * t;
286
+ t = v * sqrt(t);
287
+ if ((v == 0.0) ||
288
+ (v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) {
289
+ t = v + w;
290
+ }
291
+ return t;
292
+ }
293
+
294
+ #if defined(__cplusplus)
295
+ }
296
+ #endif /* __cplusplus */
297
+
298
+ /* aliases */
299
+ typedef cuFloatComplex cuComplex;
300
+ __host__ __device__ static __inline__ cuComplex make_cuComplex (float x,
301
+ float y)
302
+ {
303
+ return make_cuFloatComplex (x, y);
304
+ }
305
+
306
+ /* float-to-double promotion */
307
+ __host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble
308
+ (cuFloatComplex c)
309
+ {
310
+ return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c));
311
+ }
312
+
313
+ __host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat
314
+ (cuDoubleComplex c)
315
+ {
316
+ return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c));
317
+ }
318
+
319
+
320
+ __host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d)
321
+ {
322
+ float real_res;
323
+ float imag_res;
324
+
325
+ real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d);
326
+ imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d);
327
+
328
+ real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res;
329
+ imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res;
330
+
331
+ return make_cuComplex(real_res, imag_res);
332
+ }
333
+
334
+ __host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d)
335
+ {
336
+ double real_res;
337
+ double imag_res;
338
+
339
+ real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d);
340
+ imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d);
341
+
342
+ real_res = -(cuCimag(x) * cuCimag(y)) + real_res;
343
+ imag_res = (cuCimag(x) * cuCreal(y)) + imag_res;
344
+
345
+ return make_cuDoubleComplex(real_res, imag_res);
346
+ }
347
+
348
+ #endif /* !defined(CU_COMPLEX_H_) */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h ADDED
The diff for this file is too large to render. See raw diff
 
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaEGLTypedefs.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAEGLTYPEDEFS_H
51
+ #define CUDAEGLTYPEDEFS_H
52
+
53
+ #include <cudaEGL.h>
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif // __cplusplus
58
+
59
+ /*
60
+ * Macros for the latest version for each driver function in cudaEGL.h
61
+ */
62
+ #define PFN_cuGraphicsEGLRegisterImage PFN_cuGraphicsEGLRegisterImage_v7000
63
+ #define PFN_cuEGLStreamConsumerConnect PFN_cuEGLStreamConsumerConnect_v7000
64
+ #define PFN_cuEGLStreamConsumerConnectWithFlags PFN_cuEGLStreamConsumerConnectWithFlags_v8000
65
+ #define PFN_cuEGLStreamConsumerDisconnect PFN_cuEGLStreamConsumerDisconnect_v7000
66
+ #define PFN_cuEGLStreamConsumerAcquireFrame PFN_cuEGLStreamConsumerAcquireFrame_v7000
67
+ #define PFN_cuEGLStreamConsumerReleaseFrame PFN_cuEGLStreamConsumerReleaseFrame_v7000
68
+ #define PFN_cuEGLStreamProducerConnect PFN_cuEGLStreamProducerConnect_v7000
69
+ #define PFN_cuEGLStreamProducerDisconnect PFN_cuEGLStreamProducerDisconnect_v7000
70
+ #define PFN_cuEGLStreamProducerPresentFrame PFN_cuEGLStreamProducerPresentFrame_v7000
71
+ #define PFN_cuEGLStreamProducerReturnFrame PFN_cuEGLStreamProducerReturnFrame_v7000
72
+ #define PFN_cuGraphicsResourceGetMappedEglFrame PFN_cuGraphicsResourceGetMappedEglFrame_v7000
73
+ #define PFN_cuEventCreateFromEGLSync PFN_cuEventCreateFromEGLSync_v9000
74
+
75
+
76
+ /**
77
+ * Type definitions for functions defined in cudaEGL.h
78
+ */
79
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsEGLRegisterImage_v7000)(CUgraphicsResource CUDAAPI *pCudaResource, EGLImageKHR image, unsigned int flags);
80
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream);
81
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerConnectWithFlags_v8000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, unsigned int flags);
82
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
83
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerAcquireFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource CUDAAPI *pCudaResource, CUstream CUDAAPI *pStream, unsigned int timeout);
84
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamConsumerReleaseFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUgraphicsResource pCudaResource, CUstream CUDAAPI *pStream);
85
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerConnect_v7000)(CUeglStreamConnection CUDAAPI *conn, EGLStreamKHR stream, EGLint width, EGLint height);
86
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerDisconnect_v7000)(CUeglStreamConnection CUDAAPI *conn);
87
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerPresentFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 eglframe, CUstream CUDAAPI *pStream);
88
+ typedef CUresult (CUDAAPI *PFN_cuEGLStreamProducerReturnFrame_v7000)(CUeglStreamConnection CUDAAPI *conn, CUeglFrame_v1 CUDAAPI *eglframe, CUstream CUDAAPI *pStream);
89
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedEglFrame_v7000)(CUeglFrame_v1 CUDAAPI *eglFrame, CUgraphicsResource resource, unsigned int index, unsigned int mipLevel);
90
+ typedef CUresult (CUDAAPI *PFN_cuEventCreateFromEGLSync_v9000)(CUevent CUDAAPI *phEvent, EGLSyncKHR eglSync, unsigned int flags);
91
+
92
+ #ifdef __cplusplus
93
+ }
94
+ #endif // __cplusplus
95
+
96
+ #endif // file guard
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaProfilerTypedefs.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAPROFILERTYPEDEFS_H
51
+ #define CUDAPROFILERTYPEDEFS_H
52
+
53
+ #include <cudaProfiler.h>
54
+
55
+ #ifdef __cplusplus
56
+ extern "C" {
57
+ #endif // __cplusplus
58
+
59
+ /*
60
+ * Macros for the latest version for each driver function in cudaProfiler.h
61
+ */
62
+ #define PFN_cuProfilerInitialize PFN_cuProfilerInitialize_v4000
63
+ #define PFN_cuProfilerStart PFN_cuProfilerStart_v4000
64
+ #define PFN_cuProfilerStop PFN_cuProfilerStop_v4000
65
+
66
+
67
+ /**
68
+ * Type definitions for functions defined in cudaProfiler.h
69
+ */
70
+ typedef CUresult (CUDAAPI *PFN_cuProfilerInitialize_v4000)(const char *configFile, const char *outputFile, CUoutput_mode outputMode);
71
+ typedef CUresult (CUDAAPI *PFN_cuProfilerStart_v4000)(void);
72
+ typedef CUresult (CUDAAPI *PFN_cuProfilerStop_v4000)(void);
73
+
74
+ #ifdef __cplusplus
75
+ }
76
+ #endif // __cplusplus
77
+
78
+ #endif // file guard
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaTypedefs.h ADDED
@@ -0,0 +1,994 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDATYPEDEFS_H
51
+ #define CUDATYPEDEFS_H
52
+
53
+ #include <cuda.h>
54
+
55
+ #if defined(CUDA_API_PER_THREAD_DEFAULT_STREAM)
56
+ #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptds
57
+ #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## ptds_version ## _ptsz
58
+ #else
59
+ #define __API_TYPEDEF_PTDS(api, default_version, ptds_version) api ## _v ## default_version
60
+ #define __API_TYPEDEF_PTSZ(api, default_version, ptds_version) api ## _v ## default_version
61
+ #endif
62
+
63
+ #ifdef __cplusplus
64
+ extern "C" {
65
+ #endif // __cplusplus
66
+
67
+ /*
68
+ * Macros for the latest version for each driver function in cuda.h
69
+ */
70
+ #define PFN_cuGetErrorString PFN_cuGetErrorString_v6000
71
+ #define PFN_cuGetErrorName PFN_cuGetErrorName_v6000
72
+ #define PFN_cuInit PFN_cuInit_v2000
73
+ #define PFN_cuDriverGetVersion PFN_cuDriverGetVersion_v2020
74
+ #define PFN_cuDeviceGet PFN_cuDeviceGet_v2000
75
+ #define PFN_cuDeviceGetCount PFN_cuDeviceGetCount_v2000
76
+ #define PFN_cuDeviceGetName PFN_cuDeviceGetName_v2000
77
+ #define PFN_cuDeviceGetUuid PFN_cuDeviceGetUuid_v11040
78
+ #define PFN_cuDeviceGetLuid PFN_cuDeviceGetLuid_v10000
79
+ #define PFN_cuDeviceTotalMem PFN_cuDeviceTotalMem_v3020
80
+ #define PFN_cuDeviceGetTexture1DLinearMaxWidth PFN_cuDeviceGetTexture1DLinearMaxWidth_v11010
81
+ #define PFN_cuDeviceGetAttribute PFN_cuDeviceGetAttribute_v2000
82
+ #define PFN_cuDeviceGetNvSciSyncAttributes PFN_cuDeviceGetNvSciSyncAttributes_v10020
83
+ #define PFN_cuDeviceSetMemPool PFN_cuDeviceSetMemPool_v11020
84
+ #define PFN_cuDeviceGetMemPool PFN_cuDeviceGetMemPool_v11020
85
+ #define PFN_cuDeviceGetDefaultMemPool PFN_cuDeviceGetDefaultMemPool_v11020
86
+ #define PFN_cuDeviceGetProperties PFN_cuDeviceGetProperties_v2000
87
+ #define PFN_cuDeviceComputeCapability PFN_cuDeviceComputeCapability_v2000
88
+ #define PFN_cuDevicePrimaryCtxRetain PFN_cuDevicePrimaryCtxRetain_v7000
89
+ #define PFN_cuDevicePrimaryCtxRelease PFN_cuDevicePrimaryCtxRelease_v11000
90
+ #define PFN_cuDevicePrimaryCtxSetFlags PFN_cuDevicePrimaryCtxSetFlags_v11000
91
+ #define PFN_cuDevicePrimaryCtxGetState PFN_cuDevicePrimaryCtxGetState_v7000
92
+ #define PFN_cuDevicePrimaryCtxReset PFN_cuDevicePrimaryCtxReset_v11000
93
+ #define PFN_cuDeviceGetExecAffinitySupport PFN_cuDeviceGetExecAffinitySupport_v11040
94
+ #define PFN_cuCtxCreate PFN_cuCtxCreate_v11040
95
+ #define PFN_cuCtxDestroy PFN_cuCtxDestroy_v4000
96
+ #define PFN_cuCtxPushCurrent PFN_cuCtxPushCurrent_v4000
97
+ #define PFN_cuCtxPopCurrent PFN_cuCtxPopCurrent_v4000
98
+ #define PFN_cuCtxSetCurrent PFN_cuCtxSetCurrent_v4000
99
+ #define PFN_cuCtxGetCurrent PFN_cuCtxGetCurrent_v4000
100
+ #define PFN_cuCtxGetDevice PFN_cuCtxGetDevice_v2000
101
+ #define PFN_cuCtxGetFlags PFN_cuCtxGetFlags_v7000
102
+ #define PFN_cuCtxSynchronize PFN_cuCtxSynchronize_v2000
103
+ #define PFN_cuCtxSetLimit PFN_cuCtxSetLimit_v3010
104
+ #define PFN_cuCtxGetLimit PFN_cuCtxGetLimit_v3010
105
+ #define PFN_cuCtxGetCacheConfig PFN_cuCtxGetCacheConfig_v3020
106
+ #define PFN_cuCtxSetCacheConfig PFN_cuCtxSetCacheConfig_v3020
107
+ #define PFN_cuCtxGetSharedMemConfig PFN_cuCtxGetSharedMemConfig_v4020
108
+ #define PFN_cuCtxSetSharedMemConfig PFN_cuCtxSetSharedMemConfig_v4020
109
+ #define PFN_cuCtxGetApiVersion PFN_cuCtxGetApiVersion_v3020
110
+ #define PFN_cuCtxGetStreamPriorityRange PFN_cuCtxGetStreamPriorityRange_v5050
111
+ #define PFN_cuCtxResetPersistingL2Cache PFN_cuCtxResetPersistingL2Cache_v11000
112
+ #define PFN_cuCtxAttach PFN_cuCtxAttach_v2000
113
+ #define PFN_cuCtxDetach PFN_cuCtxDetach_v2000
114
+ #define PFN_cuCtxGetExecAffinity PFN_cuCtxGetExecAffinity_v11040
115
+ #define PFN_cuModuleLoad PFN_cuModuleLoad_v2000
116
+ #define PFN_cuModuleLoadData PFN_cuModuleLoadData_v2000
117
+ #define PFN_cuModuleLoadDataEx PFN_cuModuleLoadDataEx_v2010
118
+ #define PFN_cuModuleLoadFatBinary PFN_cuModuleLoadFatBinary_v2000
119
+ #define PFN_cuModuleUnload PFN_cuModuleUnload_v2000
120
+ #define PFN_cuModuleGetFunction PFN_cuModuleGetFunction_v2000
121
+ #define PFN_cuModuleGetGlobal PFN_cuModuleGetGlobal_v3020
122
+ #define PFN_cuModuleGetTexRef PFN_cuModuleGetTexRef_v2000
123
+ #define PFN_cuModuleGetSurfRef PFN_cuModuleGetSurfRef_v3000
124
+ #define PFN_cuLinkCreate PFN_cuLinkCreate_v6050
125
+ #define PFN_cuLinkAddData PFN_cuLinkAddData_v6050
126
+ #define PFN_cuLinkAddFile PFN_cuLinkAddFile_v6050
127
+ #define PFN_cuLinkComplete PFN_cuLinkComplete_v5050
128
+ #define PFN_cuLinkDestroy PFN_cuLinkDestroy_v5050
129
+ #define PFN_cuMemGetInfo PFN_cuMemGetInfo_v3020
130
+ #define PFN_cuMemAlloc PFN_cuMemAlloc_v3020
131
+ #define PFN_cuMemAllocPitch PFN_cuMemAllocPitch_v3020
132
+ #define PFN_cuMemFree PFN_cuMemFree_v3020
133
+ #define PFN_cuMemGetAddressRange PFN_cuMemGetAddressRange_v3020
134
+ #define PFN_cuMemAllocHost PFN_cuMemAllocHost_v3020
135
+ #define PFN_cuMemFreeHost PFN_cuMemFreeHost_v2000
136
+ #define PFN_cuMemHostAlloc PFN_cuMemHostAlloc_v2020
137
+ #define PFN_cuMemHostGetDevicePointer PFN_cuMemHostGetDevicePointer_v3020
138
+ #define PFN_cuMemHostGetFlags PFN_cuMemHostGetFlags_v2030
139
+ #define PFN_cuMemAllocManaged PFN_cuMemAllocManaged_v6000
140
+ #define PFN_cuDeviceGetByPCIBusId PFN_cuDeviceGetByPCIBusId_v4010
141
+ #define PFN_cuDeviceGetPCIBusId PFN_cuDeviceGetPCIBusId_v4010
142
+ #define PFN_cuIpcGetEventHandle PFN_cuIpcGetEventHandle_v4010
143
+ #define PFN_cuIpcOpenEventHandle PFN_cuIpcOpenEventHandle_v4010
144
+ #define PFN_cuIpcGetMemHandle PFN_cuIpcGetMemHandle_v4010
145
+ #define PFN_cuIpcOpenMemHandle PFN_cuIpcOpenMemHandle_v11000
146
+ #define PFN_cuIpcCloseMemHandle PFN_cuIpcCloseMemHandle_v4010
147
+ #define PFN_cuMemHostRegister PFN_cuMemHostRegister_v6050
148
+ #define PFN_cuMemHostUnregister PFN_cuMemHostUnregister_v4000
149
+ #define PFN_cuMemcpy __API_TYPEDEF_PTDS(PFN_cuMemcpy, 4000, 7000)
150
+ #define PFN_cuMemcpyPeer __API_TYPEDEF_PTDS(PFN_cuMemcpyPeer, 4000, 7000)
151
+ #define PFN_cuMemcpyHtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyHtoD, 3020, 7000)
152
+ #define PFN_cuMemcpyDtoH __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoH, 3020, 7000)
153
+ #define PFN_cuMemcpyDtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoD, 3020, 7000)
154
+ #define PFN_cuMemcpyDtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyDtoA, 3020, 7000)
155
+ #define PFN_cuMemcpyAtoD __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoD, 3020, 7000)
156
+ #define PFN_cuMemcpyHtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyHtoA, 3020, 7000)
157
+ #define PFN_cuMemcpyAtoH __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoH, 3020, 7000)
158
+ #define PFN_cuMemcpyAtoA __API_TYPEDEF_PTDS(PFN_cuMemcpyAtoA, 3020, 7000)
159
+ #define PFN_cuMemcpy2D __API_TYPEDEF_PTDS(PFN_cuMemcpy2D, 3020, 7000)
160
+ #define PFN_cuMemcpy2DUnaligned __API_TYPEDEF_PTDS(PFN_cuMemcpy2DUnaligned, 3020, 7000)
161
+ #define PFN_cuMemcpy3D __API_TYPEDEF_PTDS(PFN_cuMemcpy3D, 3020, 7000)
162
+ #define PFN_cuMemcpy3DPeer __API_TYPEDEF_PTDS(PFN_cuMemcpy3DPeer, 4000, 7000)
163
+ #define PFN_cuMemcpyAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyAsync, 4000, 7000)
164
+ #define PFN_cuMemcpyPeerAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyPeerAsync, 4000, 7000)
165
+ #define PFN_cuMemcpyHtoDAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyHtoDAsync, 3020, 7000)
166
+ #define PFN_cuMemcpyDtoHAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyDtoHAsync, 3020, 7000)
167
+ #define PFN_cuMemcpyDtoDAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyDtoDAsync, 3020, 7000)
168
+ #define PFN_cuMemcpyHtoAAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyHtoAAsync, 3020, 7000)
169
+ #define PFN_cuMemcpyAtoHAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpyAtoHAsync, 3020, 7000)
170
+ #define PFN_cuMemcpy2DAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy2DAsync, 3020, 7000)
171
+ #define PFN_cuMemcpy3DAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy3DAsync, 3020, 7000)
172
+ #define PFN_cuMemcpy3DPeerAsync __API_TYPEDEF_PTSZ(PFN_cuMemcpy3DPeerAsync, 4000, 7000)
173
+ #define PFN_cuMemsetD8 __API_TYPEDEF_PTDS(PFN_cuMemsetD8, 3020, 7000)
174
+ #define PFN_cuMemsetD16 __API_TYPEDEF_PTDS(PFN_cuMemsetD16, 3020, 7000)
175
+ #define PFN_cuMemsetD32 __API_TYPEDEF_PTDS(PFN_cuMemsetD32, 3020, 7000)
176
+ #define PFN_cuMemsetD2D8 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D8, 3020, 7000)
177
+ #define PFN_cuMemsetD2D16 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D16, 3020, 7000)
178
+ #define PFN_cuMemsetD2D32 __API_TYPEDEF_PTDS(PFN_cuMemsetD2D32, 3020, 7000)
179
+ #define PFN_cuMemsetD8Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD8Async, 3020, 7000)
180
+ #define PFN_cuMemsetD16Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD16Async, 3020, 7000)
181
+ #define PFN_cuMemsetD32Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD32Async, 3020, 7000)
182
+ #define PFN_cuMemsetD2D8Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D8Async, 3020, 7000)
183
+ #define PFN_cuMemsetD2D16Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D16Async, 3020, 7000)
184
+ #define PFN_cuMemsetD2D32Async __API_TYPEDEF_PTSZ(PFN_cuMemsetD2D32Async, 3020, 7000)
185
+ #define PFN_cuArrayCreate PFN_cuArrayCreate_v3020
186
+ #define PFN_cuArrayGetDescriptor PFN_cuArrayGetDescriptor_v3020
187
+ #define PFN_cuArrayGetSparseProperties PFN_cuArrayGetSparseProperties_v11010
188
+ #define PFN_cuMipmappedArrayGetSparseProperties PFN_cuMipmappedArrayGetSparseProperties_v11010
189
+
190
+ #define PFN_cuArrayGetMemoryRequirements PFN_cuArrayGetMemoryRequirements_v11060
191
+ #define PFN_cuMipmappedArrayGetMemoryRequirements PFN_cuMipmappedArrayGetMemoryRequirements_v11060
192
+
193
+ #define PFN_cuArrayGetPlane PFN_cuArrayGetPlane_v11020
194
+ #define PFN_cuArrayDestroy PFN_cuArrayDestroy_v2000
195
+ #define PFN_cuArray3DCreate PFN_cuArray3DCreate_v3020
196
+ #define PFN_cuArray3DGetDescriptor PFN_cuArray3DGetDescriptor_v3020
197
+ #define PFN_cuMipmappedArrayCreate PFN_cuMipmappedArrayCreate_v5000
198
+ #define PFN_cuMipmappedArrayGetLevel PFN_cuMipmappedArrayGetLevel_v5000
199
+ #define PFN_cuMipmappedArrayDestroy PFN_cuMipmappedArrayDestroy_v5000
200
+ #define PFN_cuMemAddressReserve PFN_cuMemAddressReserve_v10020
201
+ #define PFN_cuMemAddressFree PFN_cuMemAddressFree_v10020
202
+ #define PFN_cuMemCreate PFN_cuMemCreate_v10020
203
+ #define PFN_cuMemRelease PFN_cuMemRelease_v10020
204
+ #define PFN_cuMemMap PFN_cuMemMap_v10020
205
+ #define PFN_cuMemMapArrayAsync __API_TYPEDEF_PTSZ(PFN_cuMemMapArrayAsync, 11010, 11010)
206
+ #define PFN_cuMemUnmap PFN_cuMemUnmap_v10020
207
+ #define PFN_cuMemSetAccess PFN_cuMemSetAccess_v10020
208
+ #define PFN_cuMemGetAccess PFN_cuMemGetAccess_v10020
209
+ #define PFN_cuMemExportToShareableHandle PFN_cuMemExportToShareableHandle_v10020
210
+ #define PFN_cuMemImportFromShareableHandle PFN_cuMemImportFromShareableHandle_v10020
211
+ #define PFN_cuMemGetAllocationGranularity PFN_cuMemGetAllocationGranularity_v10020
212
+ #define PFN_cuMemGetAllocationPropertiesFromHandle PFN_cuMemGetAllocationPropertiesFromHandle_v10020
213
+ #define PFN_cuMemRetainAllocationHandle PFN_cuMemRetainAllocationHandle_v11000
214
+ #define PFN_cuMemFreeAsync __API_TYPEDEF_PTSZ(PFN_cuMemFreeAsync, 11020, 11020)
215
+ #define PFN_cuMemAllocAsync __API_TYPEDEF_PTSZ(PFN_cuMemAllocAsync, 11020, 11020)
216
+ #define PFN_cuMemPoolTrimTo PFN_cuMemPoolTrimTo_v11020
217
+ #define PFN_cuMemPoolSetAttribute PFN_cuMemPoolSetAttribute_v11020
218
+ #define PFN_cuMemPoolGetAttribute PFN_cuMemPoolGetAttribute_v11020
219
+ #define PFN_cuMemPoolSetAccess PFN_cuMemPoolSetAccess_v11020
220
+ #define PFN_cuMemPoolGetAccess PFN_cuMemPoolGetAccess_v11020
221
+ #define PFN_cuMemPoolCreate PFN_cuMemPoolCreate_v11020
222
+ #define PFN_cuMemPoolDestroy PFN_cuMemPoolDestroy_v11020
223
+ #define PFN_cuMemAllocFromPoolAsync __API_TYPEDEF_PTSZ(PFN_cuMemAllocFromPoolAsync, 11020, 11020)
224
+ #define PFN_cuMemPoolExportToShareableHandle PFN_cuMemPoolExportToShareableHandle_v11020
225
+ #define PFN_cuMemPoolImportFromShareableHandle PFN_cuMemPoolImportFromShareableHandle_v11020
226
+ #define PFN_cuMemPoolExportPointer PFN_cuMemPoolExportPointer_v11020
227
+ #define PFN_cuMemPoolImportPointer PFN_cuMemPoolImportPointer_v11020
228
+ #define PFN_cuPointerGetAttribute PFN_cuPointerGetAttribute_v4000
229
+ #define PFN_cuMemPrefetchAsync __API_TYPEDEF_PTSZ(PFN_cuMemPrefetchAsync, 8000, 8000)
230
+ #define PFN_cuMemAdvise PFN_cuMemAdvise_v8000
231
+ #define PFN_cuMemRangeGetAttribute PFN_cuMemRangeGetAttribute_v8000
232
+ #define PFN_cuMemRangeGetAttributes PFN_cuMemRangeGetAttributes_v8000
233
+ #define PFN_cuPointerSetAttribute PFN_cuPointerSetAttribute_v6000
234
+ #define PFN_cuPointerGetAttributes PFN_cuPointerGetAttributes_v7000
235
+ #define PFN_cuStreamCreate PFN_cuStreamCreate_v2000
236
+ #define PFN_cuStreamCreateWithPriority PFN_cuStreamCreateWithPriority_v5050
237
+ #define PFN_cuStreamGetPriority __API_TYPEDEF_PTSZ(PFN_cuStreamGetPriority, 5050, 7000)
238
+ #define PFN_cuStreamGetFlags __API_TYPEDEF_PTSZ(PFN_cuStreamGetFlags, 5050, 7000)
239
+ #define PFN_cuStreamGetCtx __API_TYPEDEF_PTSZ(PFN_cuStreamGetCtx, 9020, 9020)
240
+ #define PFN_cuStreamWaitEvent __API_TYPEDEF_PTSZ(PFN_cuStreamWaitEvent, 3020, 7000)
241
+ #define PFN_cuStreamAddCallback __API_TYPEDEF_PTSZ(PFN_cuStreamAddCallback, 5000, 7000)
242
+ #define PFN_cuStreamBeginCapture __API_TYPEDEF_PTSZ(PFN_cuStreamBeginCapture, 10010, 10010)
243
+ #define PFN_cuThreadExchangeStreamCaptureMode PFN_cuThreadExchangeStreamCaptureMode_v10010
244
+ #define PFN_cuStreamEndCapture __API_TYPEDEF_PTSZ(PFN_cuStreamEndCapture, 10000, 10000)
245
+ #define PFN_cuStreamIsCapturing __API_TYPEDEF_PTSZ(PFN_cuStreamIsCapturing, 10000, 10000)
246
+ #define PFN_cuStreamGetCaptureInfo __API_TYPEDEF_PTSZ(PFN_cuStreamGetCaptureInfo, 10010, 10010)
247
+ #define PFN_cuStreamGetCaptureInfo_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamGetCaptureInfo, 11030, 11030)
248
+ #define PFN_cuStreamUpdateCaptureDependencies __API_TYPEDEF_PTSZ(PFN_cuStreamUpdateCaptureDependencies, 11030, 11030)
249
+ #define PFN_cuStreamAttachMemAsync __API_TYPEDEF_PTSZ(PFN_cuStreamAttachMemAsync, 6000, 7000)
250
+ #define PFN_cuStreamQuery __API_TYPEDEF_PTSZ(PFN_cuStreamQuery, 2000, 7000)
251
+ #define PFN_cuStreamSynchronize __API_TYPEDEF_PTSZ(PFN_cuStreamSynchronize, 2000, 7000)
252
+ #define PFN_cuStreamDestroy PFN_cuStreamDestroy_v4000
253
+ #define PFN_cuStreamCopyAttributes __API_TYPEDEF_PTSZ(PFN_cuStreamCopyAttributes, 11000, 11000)
254
+ #define PFN_cuStreamGetAttribute __API_TYPEDEF_PTSZ(PFN_cuStreamGetAttribute, 11000, 11000)
255
+ #define PFN_cuStreamSetAttribute __API_TYPEDEF_PTSZ(PFN_cuStreamSetAttribute, 11000, 11000)
256
+ #define PFN_cuEventCreate PFN_cuEventCreate_v2000
257
+ #define PFN_cuEventRecord __API_TYPEDEF_PTSZ(PFN_cuEventRecord, 2000, 7000)
258
+ #define PFN_cuEventRecordWithFlags __API_TYPEDEF_PTSZ(PFN_cuEventRecordWithFlags, 11010, 11010)
259
+ #define PFN_cuEventQuery PFN_cuEventQuery_v2000
260
+ #define PFN_cuEventSynchronize PFN_cuEventSynchronize_v2000
261
+ #define PFN_cuEventDestroy PFN_cuEventDestroy_v4000
262
+ #define PFN_cuEventElapsedTime PFN_cuEventElapsedTime_v2000
263
+ #define PFN_cuImportExternalMemory PFN_cuImportExternalMemory_v10000
264
+ #define PFN_cuExternalMemoryGetMappedBuffer PFN_cuExternalMemoryGetMappedBuffer_v10000
265
+ #define PFN_cuExternalMemoryGetMappedMipmappedArray PFN_cuExternalMemoryGetMappedMipmappedArray_v10000
266
+ #define PFN_cuDestroyExternalMemory PFN_cuDestroyExternalMemory_v10000
267
+ #define PFN_cuImportExternalSemaphore PFN_cuImportExternalSemaphore_v10000
268
+ #define PFN_cuSignalExternalSemaphoresAsync __API_TYPEDEF_PTSZ(PFN_cuSignalExternalSemaphoresAsync, 10000, 10000)
269
+ #define PFN_cuWaitExternalSemaphoresAsync __API_TYPEDEF_PTSZ(PFN_cuWaitExternalSemaphoresAsync, 10000, 10000)
270
+ #define PFN_cuDestroyExternalSemaphore PFN_cuDestroyExternalSemaphore_v10000
271
+ #define PFN_cuStreamWaitValue32 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue32, 8000, 8000)
272
+ #define PFN_cuStreamWaitValue64 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue64, 9000, 9000)
273
+ #define PFN_cuStreamWriteValue32 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue32, 8000, 8000)
274
+ #define PFN_cuStreamWriteValue64 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue64, 9000, 9000)
275
+ #define PFN_cuStreamBatchMemOp __API_TYPEDEF_PTSZ(PFN_cuStreamBatchMemOp, 8000, 8000)
276
+
277
+ #define PFN_cuStreamWaitValue32_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue32, 11070, 11070)
278
+ #define PFN_cuStreamWaitValue64_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWaitValue64, 11070, 11070)
279
+ #define PFN_cuStreamWriteValue32_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue32, 11070, 11070)
280
+ #define PFN_cuStreamWriteValue64_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamWriteValue64, 11070, 11070)
281
+ #define PFN_cuStreamBatchMemOp_v2 __API_TYPEDEF_PTSZ(PFN_cuStreamBatchMemOp, 11070, 11070)
282
+
283
+ #define PFN_cuFuncGetAttribute PFN_cuFuncGetAttribute_v2020
284
+ #define PFN_cuFuncSetAttribute PFN_cuFuncSetAttribute_v9000
285
+ #define PFN_cuFuncSetCacheConfig PFN_cuFuncSetCacheConfig_v3000
286
+ #define PFN_cuFuncSetSharedMemConfig PFN_cuFuncSetSharedMemConfig_v4020
287
+ #define PFN_cuLaunchKernel __API_TYPEDEF_PTSZ(PFN_cuLaunchKernel, 4000, 7000)
288
+
289
+
290
+
291
+ #define PFN_cuLaunchCooperativeKernel __API_TYPEDEF_PTSZ(PFN_cuLaunchCooperativeKernel, 9000, 9000)
292
+ #define PFN_cuLaunchCooperativeKernelMultiDevice PFN_cuLaunchCooperativeKernelMultiDevice_v9000
293
+ #define PFN_cuLaunchHostFunc __API_TYPEDEF_PTSZ(PFN_cuLaunchHostFunc, 10000, 10000)
294
+ #define PFN_cuFuncSetBlockShape PFN_cuFuncSetBlockShape_v2000
295
+ #define PFN_cuFuncSetSharedSize PFN_cuFuncSetSharedSize_v2000
296
+ #define PFN_cuParamSetSize PFN_cuParamSetSize_v2000
297
+ #define PFN_cuParamSeti PFN_cuParamSeti_v2000
298
+ #define PFN_cuParamSetf PFN_cuParamSetf_v2000
299
+ #define PFN_cuParamSetv PFN_cuParamSetv_v2000
300
+ #define PFN_cuLaunch PFN_cuLaunch_v2000
301
+ #define PFN_cuLaunchGrid PFN_cuLaunchGrid_v2000
302
+ #define PFN_cuLaunchGridAsync PFN_cuLaunchGridAsync_v2000
303
+ #define PFN_cuParamSetTexRef PFN_cuParamSetTexRef_v2000
304
+ #define PFN_cuGraphCreate PFN_cuGraphCreate_v10000
305
+ #define PFN_cuGraphAddKernelNode PFN_cuGraphAddKernelNode_v10000
306
+ #define PFN_cuGraphKernelNodeGetParams PFN_cuGraphKernelNodeGetParams_v10000
307
+ #define PFN_cuGraphKernelNodeSetParams PFN_cuGraphKernelNodeSetParams_v10000
308
+ #define PFN_cuGraphAddMemcpyNode PFN_cuGraphAddMemcpyNode_v10000
309
+ #define PFN_cuGraphMemcpyNodeGetParams PFN_cuGraphMemcpyNodeGetParams_v10000
310
+ #define PFN_cuGraphMemcpyNodeSetParams PFN_cuGraphMemcpyNodeSetParams_v10000
311
+ #define PFN_cuGraphAddMemsetNode PFN_cuGraphAddMemsetNode_v10000
312
+ #define PFN_cuGraphMemsetNodeGetParams PFN_cuGraphMemsetNodeGetParams_v10000
313
+ #define PFN_cuGraphMemsetNodeSetParams PFN_cuGraphMemsetNodeSetParams_v10000
314
+ #define PFN_cuGraphAddHostNode PFN_cuGraphAddHostNode_v10000
315
+ #define PFN_cuGraphHostNodeGetParams PFN_cuGraphHostNodeGetParams_v10000
316
+ #define PFN_cuGraphHostNodeSetParams PFN_cuGraphHostNodeSetParams_v10000
317
+ #define PFN_cuGraphAddChildGraphNode PFN_cuGraphAddChildGraphNode_v10000
318
+ #define PFN_cuGraphChildGraphNodeGetGraph PFN_cuGraphChildGraphNodeGetGraph_v10000
319
+ #define PFN_cuGraphAddEmptyNode PFN_cuGraphAddEmptyNode_v10000
320
+ #define PFN_cuGraphAddEventRecordNode PFN_cuGraphAddEventRecordNode_v11010
321
+ #define PFN_cuGraphEventRecordNodeGetEvent PFN_cuGraphEventRecordNodeGetEvent_v11010
322
+ #define PFN_cuGraphEventRecordNodeSetEvent PFN_cuGraphEventRecordNodeSetEvent_v11010
323
+ #define PFN_cuGraphAddEventWaitNode PFN_cuGraphAddEventWaitNode_v11010
324
+ #define PFN_cuGraphEventWaitNodeGetEvent PFN_cuGraphEventWaitNodeGetEvent_v11010
325
+ #define PFN_cuGraphEventWaitNodeSetEvent PFN_cuGraphEventWaitNodeSetEvent_v11010
326
+ #define PFN_cuGraphAddExternalSemaphoresSignalNode PFN_cuGraphAddExternalSemaphoresSignalNode_v11020
327
+ #define PFN_cuGraphExternalSemaphoresSignalNodeGetParams PFN_cuGraphExternalSemaphoresSignalNodeGetParams_v11020
328
+ #define PFN_cuGraphExternalSemaphoresSignalNodeSetParams PFN_cuGraphExternalSemaphoresSignalNodeSetParams_v11020
329
+ #define PFN_cuGraphAddExternalSemaphoresWaitNode PFN_cuGraphAddExternalSemaphoresWaitNode_v11020
330
+ #define PFN_cuGraphExternalSemaphoresWaitNodeGetParams PFN_cuGraphExternalSemaphoresWaitNodeGetParams_v11020
331
+ #define PFN_cuGraphExternalSemaphoresWaitNodeSetParams PFN_cuGraphExternalSemaphoresWaitNodeSetParams_v11020
332
+
333
+ #define PFN_cuGraphAddBatchMemOpNode PFN_cuGraphAddBatchMemOpNode_v11070
334
+ #define PFN_cuGraphBatchMemOpNodeGetParams PFN_cuGraphBatchMemOpNodeGetParams_v11070
335
+ #define PFN_cuGraphBatchMemOpNodeSetParams PFN_cuGraphBatchMemOpNodeSetParams _v11070
336
+ #define PFN_cuGraphExecBatchMemOpNodeSetParams PFN_cuGraphExecBatchMemOpNodeSetParams_v11070
337
+
338
+ #define PFN_cuGraphClone PFN_cuGraphClone_v10000
339
+ #define PFN_cuGraphNodeFindInClone PFN_cuGraphNodeFindInClone_v10000
340
+ #define PFN_cuGraphNodeGetType PFN_cuGraphNodeGetType_v10000
341
+ #define PFN_cuGraphGetNodes PFN_cuGraphGetNodes_v10000
342
+ #define PFN_cuGraphGetRootNodes PFN_cuGraphGetRootNodes_v10000
343
+ #define PFN_cuGraphGetEdges PFN_cuGraphGetEdges_v10000
344
+ #define PFN_cuGraphNodeGetDependencies PFN_cuGraphNodeGetDependencies_v10000
345
+ #define PFN_cuGraphNodeGetDependentNodes PFN_cuGraphNodeGetDependentNodes_v10000
346
+ #define PFN_cuGraphAddDependencies PFN_cuGraphAddDependencies_v10000
347
+ #define PFN_cuGraphRemoveDependencies PFN_cuGraphRemoveDependencies_v10000
348
+ #define PFN_cuGraphDestroyNode PFN_cuGraphDestroyNode_v10000
349
+ #define PFN_cuGraphInstantiate PFN_cuGraphInstantiate_v11000
350
+ #define PFN_cuGraphInstantiateWithFlags PFN_cuGraphInstantiateWithFlags_v11040
351
+
352
+
353
+
354
+
355
+ #define PFN_cuGraphExecKernelNodeSetParams PFN_cuGraphExecKernelNodeSetParams_v10010
356
+ #define PFN_cuGraphExecMemcpyNodeSetParams PFN_cuGraphExecMemcpyNodeSetParams_v10020
357
+ #define PFN_cuGraphExecMemsetNodeSetParams PFN_cuGraphExecMemsetNodeSetParams_v10020
358
+ #define PFN_cuGraphExecHostNodeSetParams PFN_cuGraphExecHostNodeSetParams_v10020
359
+ #define PFN_cuGraphExecChildGraphNodeSetParams PFN_cuGraphExecChildGraphNodeSetParams_v11010
360
+ #define PFN_cuGraphExecEventRecordNodeSetEvent PFN_cuGraphExecEventRecordNodeSetEvent_v11010
361
+ #define PFN_cuGraphExecEventWaitNodeSetEvent PFN_cuGraphExecEventWaitNodeSetEvent_v11010
362
+ #define PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams_v11020
363
+ #define PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams_v11020
364
+ #define PFN_cuGraphUpload __API_TYPEDEF_PTSZ(PFN_cuGraphUpload, 11010, 11010)
365
+ #define PFN_cuGraphLaunch __API_TYPEDEF_PTSZ(PFN_cuGraphLaunch, 10000, 10000)
366
+ #define PFN_cuGraphExecDestroy PFN_cuGraphExecDestroy_v10000
367
+ #define PFN_cuGraphDestroy PFN_cuGraphDestroy_v10000
368
+ #define PFN_cuGraphExecUpdate PFN_cuGraphExecUpdate_v10020
369
+ #define PFN_cuGraphKernelNodeCopyAttributes PFN_cuGraphKernelNodeCopyAttributes_v11000
370
+ #define PFN_cuGraphKernelNodeGetAttribute PFN_cuGraphKernelNodeGetAttribute_v11000
371
+ #define PFN_cuGraphKernelNodeSetAttribute PFN_cuGraphKernelNodeSetAttribute_v11000
372
+ #define PFN_cuGraphDebugDotPrint PFN_cuGraphDebugDotPrint_v11030
373
+ #define PFN_cuGraphAddMemAllocNode PFN_cuGraphAddMemAllocNode_v11040
374
+ #define PFN_cuGraphMemAllocNodeGetParams PFN_cuGraphMemAllocNodeGetParams_v11040
375
+ #define PFN_cuGraphAddMemFreeNode PFN_cuGraphAddMemFreeNode_v11040
376
+ #define PFN_cuGraphMemFreeNodeGetParams PFN_cuGraphMemFreeNodeGetParams_v11040
377
+ #define PFN_cuGraphNodeSetEnabled PFN_cuGraphNodeSetEnabled_v11060
378
+ #define PFN_cuGraphNodeGetEnabled PFN_cuGraphNodeGetEnabled_v11060
379
+ #define PFN_cuDeviceGraphMemTrim PFN_cuDeviceGraphMemTrim_v11040
380
+ #define PFN_cuDeviceGetGraphMemAttribute PFN_cuDeviceGetGraphMemAttribute_v11040
381
+ #define PFN_cuDeviceSetGraphMemAttribute PFN_cuDeviceSetGraphMemAttribute_v11040
382
+ #define PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor_v6050
383
+ #define PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000
384
+ #define PFN_cuOccupancyMaxPotentialBlockSize PFN_cuOccupancyMaxPotentialBlockSize_v6050
385
+ #define PFN_cuOccupancyMaxPotentialBlockSizeWithFlags PFN_cuOccupancyMaxPotentialBlockSizeWithFlags_v7000
386
+ #define PFN_cuOccupancyAvailableDynamicSMemPerBlock PFN_cuOccupancyAvailableDynamicSMemPerBlock_v10020
387
+ #define PFN_cuTexRefSetArray PFN_cuTexRefSetArray_v2000
388
+ #define PFN_cuTexRefSetMipmappedArray PFN_cuTexRefSetMipmappedArray_v5000
389
+ #define PFN_cuTexRefSetAddress PFN_cuTexRefSetAddress_v3020
390
+ #define PFN_cuTexRefSetAddress2D PFN_cuTexRefSetAddress2D_v4010
391
+ #define PFN_cuTexRefSetFormat PFN_cuTexRefSetFormat_v2000
392
+ #define PFN_cuTexRefSetAddressMode PFN_cuTexRefSetAddressMode_v2000
393
+ #define PFN_cuTexRefSetFilterMode PFN_cuTexRefSetFilterMode_v2000
394
+ #define PFN_cuTexRefSetMipmapFilterMode PFN_cuTexRefSetMipmapFilterMode_v5000
395
+ #define PFN_cuTexRefSetMipmapLevelBias PFN_cuTexRefSetMipmapLevelBias_v5000
396
+ #define PFN_cuTexRefSetMipmapLevelClamp PFN_cuTexRefSetMipmapLevelClamp_v5000
397
+ #define PFN_cuTexRefSetMaxAnisotropy PFN_cuTexRefSetMaxAnisotropy_v5000
398
+ #define PFN_cuTexRefSetBorderColor PFN_cuTexRefSetBorderColor_v8000
399
+ #define PFN_cuTexRefSetFlags PFN_cuTexRefSetFlags_v2000
400
+ #define PFN_cuTexRefGetAddress PFN_cuTexRefGetAddress_v3020
401
+ #define PFN_cuTexRefGetArray PFN_cuTexRefGetArray_v2000
402
+ #define PFN_cuTexRefGetMipmappedArray PFN_cuTexRefGetMipmappedArray_v5000
403
+ #define PFN_cuTexRefGetAddressMode PFN_cuTexRefGetAddressMode_v2000
404
+ #define PFN_cuTexRefGetFilterMode PFN_cuTexRefGetFilterMode_v2000
405
+ #define PFN_cuTexRefGetFormat PFN_cuTexRefGetFormat_v2000
406
+ #define PFN_cuTexRefGetMipmapFilterMode PFN_cuTexRefGetMipmapFilterMode_v5000
407
+ #define PFN_cuTexRefGetMipmapLevelBias PFN_cuTexRefGetMipmapLevelBias_v5000
408
+ #define PFN_cuTexRefGetMipmapLevelClamp PFN_cuTexRefGetMipmapLevelClamp_v5000
409
+ #define PFN_cuTexRefGetMaxAnisotropy PFN_cuTexRefGetMaxAnisotropy_v5000
410
+ #define PFN_cuTexRefGetBorderColor PFN_cuTexRefGetBorderColor_v8000
411
+ #define PFN_cuTexRefGetFlags PFN_cuTexRefGetFlags_v2000
412
+ #define PFN_cuTexRefCreate PFN_cuTexRefCreate_v2000
413
+ #define PFN_cuTexRefDestroy PFN_cuTexRefDestroy_v2000
414
+ #define PFN_cuSurfRefSetArray PFN_cuSurfRefSetArray_v3000
415
+ #define PFN_cuSurfRefGetArray PFN_cuSurfRefGetArray_v3000
416
+ #define PFN_cuTexObjectCreate PFN_cuTexObjectCreate_v5000
417
+ #define PFN_cuTexObjectDestroy PFN_cuTexObjectDestroy_v5000
418
+ #define PFN_cuTexObjectGetResourceDesc PFN_cuTexObjectGetResourceDesc_v5000
419
+ #define PFN_cuTexObjectGetTextureDesc PFN_cuTexObjectGetTextureDesc_v5000
420
+ #define PFN_cuTexObjectGetResourceViewDesc PFN_cuTexObjectGetResourceViewDesc_v5000
421
+ #define PFN_cuSurfObjectCreate PFN_cuSurfObjectCreate_v5000
422
+ #define PFN_cuSurfObjectDestroy PFN_cuSurfObjectDestroy_v5000
423
+ #define PFN_cuSurfObjectGetResourceDesc PFN_cuSurfObjectGetResourceDesc_v5000
424
+ #define PFN_cuDeviceCanAccessPeer PFN_cuDeviceCanAccessPeer_v4000
425
+ #define PFN_cuCtxEnablePeerAccess PFN_cuCtxEnablePeerAccess_v4000
426
+ #define PFN_cuCtxDisablePeerAccess PFN_cuCtxDisablePeerAccess_v4000
427
+ #define PFN_cuDeviceGetP2PAttribute PFN_cuDeviceGetP2PAttribute_v8000
428
+ #define PFN_cuGraphicsUnregisterResource PFN_cuGraphicsUnregisterResource_v3000
429
+ #define PFN_cuGraphicsSubResourceGetMappedArray PFN_cuGraphicsSubResourceGetMappedArray_v3000
430
+ #define PFN_cuGraphicsResourceGetMappedMipmappedArray PFN_cuGraphicsResourceGetMappedMipmappedArray_v5000
431
+ #define PFN_cuGraphicsResourceGetMappedPointer PFN_cuGraphicsResourceGetMappedPointer_v3020
432
+ #define PFN_cuGraphicsResourceSetMapFlags PFN_cuGraphicsResourceSetMapFlags_v6050
433
+ #define PFN_cuGraphicsMapResources __API_TYPEDEF_PTSZ(PFN_cuGraphicsMapResources, 3000, 7000)
434
+ #define PFN_cuGraphicsUnmapResources __API_TYPEDEF_PTSZ(PFN_cuGraphicsUnmapResources, 3000, 7000)
435
+ #define PFN_cuGetExportTable PFN_cuGetExportTable_v3000
436
+ #define PFN_cuFuncGetModule PFN_cuFuncGetModule_v11000
437
+ #define PFN_cuFlushGPUDirectRDMAWrites PFN_cuFlushGPUDirectRDMAWrites_v11030
438
+ #define PFN_cuGetProcAddress PFN_cuGetProcAddress_v11030
439
+ #define PFN_cuUserObjectCreate PFN_cuUserObjectCreate_v11030
440
+ #define PFN_cuUserObjectRetain PFN_cuUserObjectRetain_v11030
441
+ #define PFN_cuUserObjectRelease PFN_cuUserObjectRelease_v11030
442
+ #define PFN_cuGraphRetainUserObject PFN_cuGraphRetainUserObject_v11030
443
+ #define PFN_cuGraphReleaseUserObject PFN_cuGraphReleaseUserObject_v11030
444
+
445
+ #define PFN_cuModuleGetLoadingMode PFN_cuModuleGetLoadingMode_v11070
446
+
447
+
448
+ #define PFN_cuMemGetHandleForAddressRange PFN_cuMemGetHandleForAddressRange_v11070
449
+
450
+
451
+ /*
452
+ * Type definitions for functions defined in cuda.h
453
+ */
454
+ typedef CUresult (CUDAAPI *PFN_cuGetErrorString_v6000)(CUresult error, const char **pStr);
455
+ typedef CUresult (CUDAAPI *PFN_cuGetErrorName_v6000)(CUresult error, const char **pStr);
456
+ typedef CUresult (CUDAAPI *PFN_cuInit_v2000)(unsigned int Flags);
457
+ typedef CUresult (CUDAAPI *PFN_cuDriverGetVersion_v2020)(int *driverVersion);
458
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGet_v2000)(CUdevice_v1 *device, int ordinal);
459
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetCount_v2000)(int *count);
460
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetName_v2000)(char *name, int len, CUdevice_v1 dev);
461
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetUuid_v9020)(CUuuid *uuid, CUdevice_v1 dev);
462
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetUuid_v11040)(CUuuid *uuid, CUdevice_v1 dev);
463
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetLuid_v10000)(char *luid, unsigned int *deviceNodeMask, CUdevice_v1 dev);
464
+ typedef CUresult (CUDAAPI *PFN_cuDeviceTotalMem_v3020)(size_t *bytes, CUdevice_v1 dev);
465
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetTexture1DLinearMaxWidth_v11010)(size_t *maxWidthInElements, CUarray_format format, unsigned numChannels, CUdevice_v1 dev);
466
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetAttribute_v2000)(int *pi, CUdevice_attribute attrib, CUdevice_v1 dev);
467
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetNvSciSyncAttributes_v10020)(void *nvSciSyncAttrList, CUdevice_v1 dev, int flags);
468
+ typedef CUresult (CUDAAPI *PFN_cuDeviceSetMemPool_v11020)(CUdevice_v1 dev, CUmemoryPool pool);
469
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetMemPool_v11020)(CUmemoryPool *pool, CUdevice_v1 dev);
470
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetDefaultMemPool_v11020)(CUmemoryPool *pool_out, CUdevice_v1 dev);
471
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetProperties_v2000)(CUdevprop_v1 *prop, CUdevice_v1 dev);
472
+ typedef CUresult (CUDAAPI *PFN_cuDeviceComputeCapability_v2000)(int *major, int *minor, CUdevice_v1 dev);
473
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRetain_v7000)(CUcontext *pctx, CUdevice_v1 dev);
474
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRelease_v11000)(CUdevice_v1 dev);
475
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxSetFlags_v11000)(CUdevice_v1 dev, unsigned int flags);
476
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxGetState_v7000)(CUdevice_v1 dev, unsigned int *flags, int *active);
477
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxReset_v11000)(CUdevice_v1 dev);
478
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetExecAffinitySupport_v11040)(int *pi, CUexecAffinityType type, CUdevice dev);
479
+ typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v3020)(CUcontext *pctx, unsigned int flags, CUdevice_v1 dev);
480
+ typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v11040)(CUcontext *pctx, CUexecAffinityParam *paramsArray, int numParams, unsigned int flags, CUdevice_v1 dev);
481
+ typedef CUresult (CUDAAPI *PFN_cuCtxDestroy_v4000)(CUcontext ctx);
482
+ typedef CUresult (CUDAAPI *PFN_cuCtxPushCurrent_v4000)(CUcontext ctx);
483
+ typedef CUresult (CUDAAPI *PFN_cuCtxPopCurrent_v4000)(CUcontext *pctx);
484
+ typedef CUresult (CUDAAPI *PFN_cuCtxSetCurrent_v4000)(CUcontext ctx);
485
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetCurrent_v4000)(CUcontext *pctx);
486
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetDevice_v2000)(CUdevice_v1 *device);
487
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetFlags_v7000)(unsigned int *flags);
488
+ typedef CUresult (CUDAAPI *PFN_cuCtxSynchronize_v2000)(void);
489
+ typedef CUresult (CUDAAPI *PFN_cuCtxSetLimit_v3010)(CUlimit limit, size_t value);
490
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetLimit_v3010)(size_t *pvalue, CUlimit limit);
491
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetCacheConfig_v3020)(CUfunc_cache *pconfig);
492
+ typedef CUresult (CUDAAPI *PFN_cuCtxSetCacheConfig_v3020)(CUfunc_cache config);
493
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetSharedMemConfig_v4020)(CUsharedconfig *pConfig);
494
+ typedef CUresult (CUDAAPI *PFN_cuCtxSetSharedMemConfig_v4020)(CUsharedconfig config);
495
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetApiVersion_v3020)(CUcontext ctx, unsigned int *version);
496
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetStreamPriorityRange_v5050)(int *leastPriority, int *greatestPriority);
497
+ typedef CUresult (CUDAAPI *PFN_cuCtxResetPersistingL2Cache_v11000)(void);
498
+ typedef CUresult (CUDAAPI *PFN_cuCtxAttach_v2000)(CUcontext *pctx, unsigned int flags);
499
+ typedef CUresult (CUDAAPI *PFN_cuCtxDetach_v2000)(CUcontext ctx);
500
+ typedef CUresult (CUDAAPI *PFN_cuCtxGetExecAffinity_v11040)(CUexecAffinityParam *pExecAffinity, CUexecAffinityType type);
501
+ typedef CUresult (CUDAAPI *PFN_cuModuleLoad_v2000)(CUmodule *module, const char *fname);
502
+ typedef CUresult (CUDAAPI *PFN_cuModuleLoadData_v2000)(CUmodule *module, const void *image);
503
+ typedef CUresult (CUDAAPI *PFN_cuModuleLoadDataEx_v2010)(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues);
504
+ typedef CUresult (CUDAAPI *PFN_cuModuleLoadFatBinary_v2000)(CUmodule *module, const void *fatCubin);
505
+ typedef CUresult (CUDAAPI *PFN_cuModuleUnload_v2000)(CUmodule hmod);
506
+ typedef CUresult (CUDAAPI *PFN_cuModuleGetFunction_v2000)(CUfunction *hfunc, CUmodule hmod, const char *name);
507
+ typedef CUresult (CUDAAPI *PFN_cuModuleGetGlobal_v3020)(CUdeviceptr_v2 *dptr, size_t *bytes, CUmodule hmod, const char *name);
508
+ typedef CUresult (CUDAAPI *PFN_cuModuleGetTexRef_v2000)(CUtexref *pTexRef, CUmodule hmod, const char *name);
509
+ typedef CUresult (CUDAAPI *PFN_cuModuleGetSurfRef_v3000)(CUsurfref *pSurfRef, CUmodule hmod, const char *name);
510
+ typedef CUresult (CUDAAPI *PFN_cuLinkCreate_v6050)(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut);
511
+ typedef CUresult (CUDAAPI *PFN_cuLinkAddData_v6050)(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, unsigned int numOptions, CUjit_option *options, void **optionValues);
512
+ typedef CUresult (CUDAAPI *PFN_cuLinkAddFile_v6050)(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues);
513
+ typedef CUresult (CUDAAPI *PFN_cuLinkComplete_v5050)(CUlinkState state, void **cubinOut, size_t *sizeOut);
514
+ typedef CUresult (CUDAAPI *PFN_cuLinkDestroy_v5050)(CUlinkState state);
515
+ typedef CUresult (CUDAAPI *PFN_cuMemGetInfo_v3020)(size_t *free, size_t *total);
516
+ typedef CUresult (CUDAAPI *PFN_cuMemAlloc_v3020)(CUdeviceptr_v2 *dptr, size_t bytesize);
517
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocPitch_v3020)(CUdeviceptr_v2 *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes);
518
+ typedef CUresult (CUDAAPI *PFN_cuMemFree_v3020)(CUdeviceptr_v2 dptr);
519
+ typedef CUresult (CUDAAPI *PFN_cuMemGetAddressRange_v3020)(CUdeviceptr_v2 *pbase, size_t *psize, CUdeviceptr_v2 dptr);
520
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocHost_v3020)(void **pp, size_t bytesize);
521
+ typedef CUresult (CUDAAPI *PFN_cuMemFreeHost_v2000)(void *p);
522
+ typedef CUresult (CUDAAPI *PFN_cuMemHostAlloc_v2020)(void **pp, size_t bytesize, unsigned int Flags);
523
+ typedef CUresult (CUDAAPI *PFN_cuMemHostGetDevicePointer_v3020)(CUdeviceptr_v2 *pdptr, void *p, unsigned int Flags);
524
+ typedef CUresult (CUDAAPI *PFN_cuMemHostGetFlags_v2030)(unsigned int *pFlags, void *p);
525
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocManaged_v6000)(CUdeviceptr_v2 *dptr, size_t bytesize, unsigned int flags);
526
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetByPCIBusId_v4010)(CUdevice_v1 *dev, const char *pciBusId);
527
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetPCIBusId_v4010)(char *pciBusId, int len, CUdevice_v1 dev);
528
+ typedef CUresult (CUDAAPI *PFN_cuIpcGetEventHandle_v4010)(CUipcEventHandle_v1 *pHandle, CUevent event);
529
+ typedef CUresult (CUDAAPI *PFN_cuIpcOpenEventHandle_v4010)(CUevent *phEvent, CUipcEventHandle_v1 handle);
530
+ typedef CUresult (CUDAAPI *PFN_cuIpcGetMemHandle_v4010)(CUipcMemHandle_v1 *pHandle, CUdeviceptr_v2 dptr);
531
+ typedef CUresult (CUDAAPI *PFN_cuIpcOpenMemHandle_v11000)(CUdeviceptr_v2 *pdptr, CUipcMemHandle_v1 handle, unsigned int Flags);
532
+ typedef CUresult (CUDAAPI *PFN_cuIpcCloseMemHandle_v4010)(CUdeviceptr_v2 dptr);
533
+ typedef CUresult (CUDAAPI *PFN_cuMemHostRegister_v6050)(void *p, size_t bytesize, unsigned int Flags);
534
+ typedef CUresult (CUDAAPI *PFN_cuMemHostUnregister_v4000)(void *p);
535
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy_v7000_ptds)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount);
536
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyPeer_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount);
537
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount);
538
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v7000_ptds)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount);
539
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount);
540
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, CUdeviceptr_v2 srcDevice, size_t ByteCount);
541
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v7000_ptds)(CUdeviceptr_v2 dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount);
542
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount);
543
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v7000_ptds)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount);
544
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v7000_ptds)(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount);
545
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v7000_ptds)(const CUDA_MEMCPY2D_v2 *pCopy);
546
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v7000_ptds)(const CUDA_MEMCPY2D_v2 *pCopy);
547
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v7000_ptds)(const CUDA_MEMCPY3D_v2 *pCopy);
548
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeer_v7000_ptds)(const CUDA_MEMCPY3D_PEER_v1 *pCopy);
549
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAsync_v7000_ptsz)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount, CUstream hStream);
550
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyPeerAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream);
551
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream);
552
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v7000_ptsz)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream);
553
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v7000_ptsz)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream);
554
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v7000_ptsz)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream);
555
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v7000_ptsz)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream);
556
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v7000_ptsz)(const CUDA_MEMCPY2D_v2 *pCopy, CUstream hStream);
557
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v7000_ptsz)(const CUDA_MEMCPY3D_v2 *pCopy, CUstream hStream);
558
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeerAsync_v7000_ptsz)(const CUDA_MEMCPY3D_PEER_v1 *pCopy, CUstream hStream);
559
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N);
560
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N);
561
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v7000_ptds)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N);
562
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height);
563
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height);
564
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v7000_ptds)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height);
565
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD8Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N, CUstream hStream);
566
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD16Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N, CUstream hStream);
567
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD32Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N, CUstream hStream);
568
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream);
569
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream);
570
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32Async_v7000_ptsz)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream);
571
+ typedef CUresult (CUDAAPI *PFN_cuArrayCreate_v3020)(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v2 *pAllocateArray);
572
+ typedef CUresult (CUDAAPI *PFN_cuArrayGetDescriptor_v3020)(CUDA_ARRAY_DESCRIPTOR_v2 *pArrayDescriptor, CUarray hArray);
573
+ typedef CUresult (CUDAAPI *PFN_cuArrayGetSparseProperties_v11010)(CUDA_ARRAY_SPARSE_PROPERTIES_v1 *sparseProperties, CUarray array);
574
+ typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetSparseProperties_v11010)(CUDA_ARRAY_SPARSE_PROPERTIES_v1 *sparseProperties, CUmipmappedArray mipmap);
575
+
576
+ typedef CUresult (CUDAAPI *PFN_cuArrayGetMemoryRequirements_v11060)(CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 *memoryRequirements, CUarray array, CUdevice device);
577
+ typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetMemoryRequirements_v11060)(CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 *memoryRequirements, CUmipmappedArray mipmap, CUdevice device);
578
+
579
+ typedef CUresult (CUDAAPI *PFN_cuArrayGetPlane_v11020)(CUarray *pPlaneArray, CUarray hArray, unsigned int planeIdx);
580
+ typedef CUresult (CUDAAPI *PFN_cuArrayDestroy_v2000)(CUarray hArray);
581
+ typedef CUresult (CUDAAPI *PFN_cuArray3DCreate_v3020)(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v2 *pAllocateArray);
582
+ typedef CUresult (CUDAAPI *PFN_cuArray3DGetDescriptor_v3020)(CUDA_ARRAY3D_DESCRIPTOR_v2 *pArrayDescriptor, CUarray hArray);
583
+ typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayCreate_v5000)(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v2 *pMipmappedArrayDesc, unsigned int numMipmapLevels);
584
+ typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayGetLevel_v5000)(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level);
585
+ typedef CUresult (CUDAAPI *PFN_cuMipmappedArrayDestroy_v5000)(CUmipmappedArray hMipmappedArray);
586
+ typedef CUresult (CUDAAPI *PFN_cuMemAddressReserve_v10020)(CUdeviceptr_v2 *ptr, size_t size, size_t alignment, CUdeviceptr_v2 addr, unsigned long long flags);
587
+ typedef CUresult (CUDAAPI *PFN_cuMemAddressFree_v10020)(CUdeviceptr_v2 ptr, size_t size);
588
+ typedef CUresult (CUDAAPI *PFN_cuMemCreate_v10020)(CUmemGenericAllocationHandle_v1 *handle, size_t size, const CUmemAllocationProp_v1 *prop, unsigned long long flags);
589
+ typedef CUresult (CUDAAPI *PFN_cuMemRelease_v10020)(CUmemGenericAllocationHandle_v1 handle);
590
+ typedef CUresult (CUDAAPI *PFN_cuMemMap_v10020)(CUdeviceptr_v2 ptr, size_t size, size_t offset, CUmemGenericAllocationHandle_v1 handle, unsigned long long flags);
591
+ typedef CUresult (CUDAAPI *PFN_cuMemMapArrayAsync_v11010_ptsz)(CUarrayMapInfo_v1 *mapInfoList, unsigned int count, CUstream hStream);
592
+ typedef CUresult (CUDAAPI *PFN_cuMemUnmap_v10020)(CUdeviceptr_v2 ptr, size_t size);
593
+ typedef CUresult (CUDAAPI *PFN_cuMemSetAccess_v10020)(CUdeviceptr_v2 ptr, size_t size, const CUmemAccessDesc_v1 *desc, size_t count);
594
+ typedef CUresult (CUDAAPI *PFN_cuMemGetAccess_v10020)(unsigned long long *flags, const CUmemLocation_v1 *location, CUdeviceptr_v2 ptr);
595
+ typedef CUresult (CUDAAPI *PFN_cuMemExportToShareableHandle_v10020)(void *shareableHandle, CUmemGenericAllocationHandle_v1 handle, CUmemAllocationHandleType handleType, unsigned long long flags);
596
+ typedef CUresult (CUDAAPI *PFN_cuMemImportFromShareableHandle_v10020)(CUmemGenericAllocationHandle_v1 *handle, void *osHandle, CUmemAllocationHandleType shHandleType);
597
+ typedef CUresult (CUDAAPI *PFN_cuMemGetAllocationGranularity_v10020)(size_t *granularity, const CUmemAllocationProp_v1 *prop, CUmemAllocationGranularity_flags option);
598
+ typedef CUresult (CUDAAPI *PFN_cuMemGetAllocationPropertiesFromHandle_v10020)(CUmemAllocationProp_v1 *prop, CUmemGenericAllocationHandle_v1 handle);
599
+ typedef CUresult (CUDAAPI *PFN_cuMemRetainAllocationHandle_v11000)(CUmemGenericAllocationHandle_v1 *handle, void *addr);
600
+ typedef CUresult (CUDAAPI *PFN_cuMemFreeAsync_v11020_ptsz)(CUdeviceptr_v2 dptr, CUstream hStream);
601
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocAsync_v11020_ptsz)(CUdeviceptr_v2 *dptr, size_t bytesize, CUstream hStream);
602
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolTrimTo_v11020)(CUmemoryPool pool, size_t minBytesToKeep);
603
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolSetAttribute_v11020)(CUmemoryPool pool, CUmemPool_attribute attr, void *value);
604
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolGetAttribute_v11020)(CUmemoryPool pool, CUmemPool_attribute attr, void *value);
605
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolSetAccess_v11020)(CUmemoryPool pool, const CUmemAccessDesc_v1 *map, size_t count);
606
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolGetAccess_v11020)(CUmemAccess_flags *flags, CUmemoryPool memPool, CUmemLocation_v1 *location);
607
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolCreate_v11020)(CUmemoryPool *pool, const CUmemPoolProps_v1 *poolProps);
608
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolDestroy_v11020)(CUmemoryPool pool);
609
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocFromPoolAsync_v11020_ptsz)(CUdeviceptr_v2 *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream);
610
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolExportToShareableHandle_v11020)(void *handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags);
611
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolImportFromShareableHandle_v11020)(CUmemoryPool *pool_out, void *handle, CUmemAllocationHandleType handleType, unsigned long long flags);
612
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolExportPointer_v11020)(CUmemPoolPtrExportData_v1 *shareData_out, CUdeviceptr_v2 ptr);
613
+ typedef CUresult (CUDAAPI *PFN_cuMemPoolImportPointer_v11020)(CUdeviceptr_v2 *ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData_v1 *shareData);
614
+ typedef CUresult (CUDAAPI *PFN_cuPointerGetAttribute_v4000)(void *data, CUpointer_attribute attribute, CUdeviceptr_v2 ptr);
615
+ typedef CUresult (CUDAAPI *PFN_cuMemPrefetchAsync_v8000_ptsz)(CUdeviceptr_v2 devPtr, size_t count, CUdevice_v1 dstDevice, CUstream hStream);
616
+ typedef CUresult (CUDAAPI *PFN_cuMemAdvise_v8000)(CUdeviceptr_v2 devPtr, size_t count, CUmem_advise advice, CUdevice_v1 device);
617
+ typedef CUresult (CUDAAPI *PFN_cuMemRangeGetAttribute_v8000)(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr_v2 devPtr, size_t count);
618
+ typedef CUresult (CUDAAPI *PFN_cuMemRangeGetAttributes_v8000)(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr_v2 devPtr, size_t count);
619
+ typedef CUresult (CUDAAPI *PFN_cuPointerSetAttribute_v6000)(const void *value, CUpointer_attribute attribute, CUdeviceptr_v2 ptr);
620
+ typedef CUresult (CUDAAPI *PFN_cuPointerGetAttributes_v7000)(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr_v2 ptr);
621
+ typedef CUresult (CUDAAPI *PFN_cuStreamCreate_v2000)(CUstream *phStream, unsigned int Flags);
622
+ typedef CUresult (CUDAAPI *PFN_cuStreamCreateWithPriority_v5050)(CUstream *phStream, unsigned int flags, int priority);
623
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetPriority_v7000_ptsz)(CUstream hStream, int *priority);
624
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetFlags_v7000_ptsz)(CUstream hStream, unsigned int *flags);
625
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetCtx_v9020_ptsz)(CUstream hStream, CUcontext *pctx);
626
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitEvent_v7000_ptsz)(CUstream hStream, CUevent hEvent, unsigned int Flags);
627
+ typedef CUresult (CUDAAPI *PFN_cuStreamAddCallback_v7000_ptsz)(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags);
628
+ typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10010_ptsz)(CUstream hStream, CUstreamCaptureMode mode);
629
+ typedef CUresult (CUDAAPI *PFN_cuThreadExchangeStreamCaptureMode_v10010)(CUstreamCaptureMode *mode);
630
+ typedef CUresult (CUDAAPI *PFN_cuStreamEndCapture_v10000_ptsz)(CUstream hStream, CUgraph *phGraph);
631
+ typedef CUresult (CUDAAPI *PFN_cuStreamIsCapturing_v10000_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus);
632
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v10010_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out);
633
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v11030_ptsz)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out);
634
+ typedef CUresult (CUDAAPI *PFN_cuStreamUpdateCaptureDependencies_v11030_ptsz)(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags);
635
+ typedef CUresult (CUDAAPI *PFN_cuStreamAttachMemAsync_v7000_ptsz)(CUstream hStream, CUdeviceptr_v2 dptr, size_t length, unsigned int flags);
636
+ typedef CUresult (CUDAAPI *PFN_cuStreamQuery_v7000_ptsz)(CUstream hStream);
637
+ typedef CUresult (CUDAAPI *PFN_cuStreamSynchronize_v7000_ptsz)(CUstream hStream);
638
+ typedef CUresult (CUDAAPI *PFN_cuStreamDestroy_v4000)(CUstream hStream);
639
+ typedef CUresult (CUDAAPI *PFN_cuStreamCopyAttributes_v11000_ptsz)(CUstream dst, CUstream src);
640
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetAttribute_v11000_ptsz)(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue_v1 *value_out);
641
+ typedef CUresult (CUDAAPI *PFN_cuStreamSetAttribute_v11000_ptsz)(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue_v1 *value);
642
+ typedef CUresult (CUDAAPI *PFN_cuEventCreate_v2000)(CUevent *phEvent, unsigned int Flags);
643
+ typedef CUresult (CUDAAPI *PFN_cuEventRecord_v7000_ptsz)(CUevent hEvent, CUstream hStream);
644
+ typedef CUresult (CUDAAPI *PFN_cuEventRecordWithFlags_v11010_ptsz)(CUevent hEvent, CUstream hStream, unsigned int flags);
645
+ typedef CUresult (CUDAAPI *PFN_cuEventQuery_v2000)(CUevent hEvent);
646
+ typedef CUresult (CUDAAPI *PFN_cuEventSynchronize_v2000)(CUevent hEvent);
647
+ typedef CUresult (CUDAAPI *PFN_cuEventDestroy_v4000)(CUevent hEvent);
648
+ typedef CUresult (CUDAAPI *PFN_cuEventElapsedTime_v2000)(float *pMilliseconds, CUevent hStart, CUevent hEnd);
649
+ typedef CUresult (CUDAAPI *PFN_cuImportExternalMemory_v10000)(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 *memHandleDesc);
650
+ typedef CUresult (CUDAAPI *PFN_cuExternalMemoryGetMappedBuffer_v10000)(CUdeviceptr_v2 *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 *bufferDesc);
651
+ typedef CUresult (CUDAAPI *PFN_cuExternalMemoryGetMappedMipmappedArray_v10000)(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 *mipmapDesc);
652
+ typedef CUresult (CUDAAPI *PFN_cuDestroyExternalMemory_v10000)(CUexternalMemory extMem);
653
+ typedef CUresult (CUDAAPI *PFN_cuImportExternalSemaphore_v10000)(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 *semHandleDesc);
654
+ typedef CUresult (CUDAAPI *PFN_cuSignalExternalSemaphoresAsync_v10000_ptsz)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream);
655
+ typedef CUresult (CUDAAPI *PFN_cuWaitExternalSemaphoresAsync_v10000_ptsz)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream);
656
+ typedef CUresult (CUDAAPI *PFN_cuDestroyExternalSemaphore_v10000)(CUexternalSemaphore extSem);
657
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v8000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
658
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v9000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
659
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v8000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
660
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v9000_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
661
+ typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v8000_ptsz)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams_v1 *paramArray, unsigned int flags);
662
+
663
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
664
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
665
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
666
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v11070_ptsz)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
667
+ typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v11070_ptsz)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags);
668
+
669
+ typedef CUresult (CUDAAPI *PFN_cuFuncGetAttribute_v2020)(int *pi, CUfunction_attribute attrib, CUfunction hfunc);
670
+ typedef CUresult (CUDAAPI *PFN_cuFuncSetAttribute_v9000)(CUfunction hfunc, CUfunction_attribute attrib, int value);
671
+ typedef CUresult (CUDAAPI *PFN_cuFuncSetCacheConfig_v3000)(CUfunction hfunc, CUfunc_cache config);
672
+ typedef CUresult (CUDAAPI *PFN_cuFuncSetSharedMemConfig_v4020)(CUfunction hfunc, CUsharedconfig config);
673
+ typedef CUresult (CUDAAPI *PFN_cuLaunchKernel_v7000_ptsz)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra);
674
+
675
+
676
+
677
+ typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernel_v9000_ptsz)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams);
678
+ typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernelMultiDevice_v9000)(CUDA_LAUNCH_PARAMS_v1 *launchParamsList, unsigned int numDevices, unsigned int flags);
679
+ typedef CUresult (CUDAAPI *PFN_cuLaunchHostFunc_v10000_ptsz)(CUstream hStream, CUhostFn fn, void *userData);
680
+ typedef CUresult (CUDAAPI *PFN_cuFuncSetBlockShape_v2000)(CUfunction hfunc, int x, int y, int z);
681
+ typedef CUresult (CUDAAPI *PFN_cuFuncSetSharedSize_v2000)(CUfunction hfunc, unsigned int bytes);
682
+ typedef CUresult (CUDAAPI *PFN_cuParamSetSize_v2000)(CUfunction hfunc, unsigned int numbytes);
683
+ typedef CUresult (CUDAAPI *PFN_cuParamSeti_v2000)(CUfunction hfunc, int offset, unsigned int value);
684
+ typedef CUresult (CUDAAPI *PFN_cuParamSetf_v2000)(CUfunction hfunc, int offset, float value);
685
+ typedef CUresult (CUDAAPI *PFN_cuParamSetv_v2000)(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes);
686
+ typedef CUresult (CUDAAPI *PFN_cuLaunch_v2000)(CUfunction f);
687
+ typedef CUresult (CUDAAPI *PFN_cuLaunchGrid_v2000)(CUfunction f, int grid_width, int grid_height);
688
+ typedef CUresult (CUDAAPI *PFN_cuLaunchGridAsync_v2000)(CUfunction f, int grid_width, int grid_height, CUstream hStream);
689
+ typedef CUresult (CUDAAPI *PFN_cuParamSetTexRef_v2000)(CUfunction hfunc, int texunit, CUtexref hTexRef);
690
+ typedef CUresult (CUDAAPI *PFN_cuGraphCreate_v10000)(CUgraph *phGraph, unsigned int flags);
691
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddKernelNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams);
692
+ typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetParams_v10000)(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams);
693
+ typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams);
694
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddMemcpyNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D_v2 *copyParams, CUcontext ctx);
695
+ typedef CUresult (CUDAAPI *PFN_cuGraphMemcpyNodeGetParams_v10000)(CUgraphNode hNode, CUDA_MEMCPY3D_v2 *nodeParams);
696
+ typedef CUresult (CUDAAPI *PFN_cuGraphMemcpyNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_MEMCPY3D_v2 *nodeParams);
697
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddMemsetNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS_v1 *memsetParams, CUcontext ctx);
698
+ typedef CUresult (CUDAAPI *PFN_cuGraphMemsetNodeGetParams_v10000)(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS_v1 *nodeParams);
699
+ typedef CUresult (CUDAAPI *PFN_cuGraphMemsetNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS_v1 *nodeParams);
700
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddHostNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams);
701
+ typedef CUresult (CUDAAPI *PFN_cuGraphHostNodeGetParams_v10000)(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS_v1 *nodeParams);
702
+ typedef CUresult (CUDAAPI *PFN_cuGraphHostNodeSetParams_v10000)(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams);
703
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddChildGraphNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph);
704
+ typedef CUresult (CUDAAPI *PFN_cuGraphChildGraphNodeGetGraph_v10000)(CUgraphNode hNode, CUgraph *phGraph);
705
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddEmptyNode_v10000)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies);
706
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddEventRecordNode_v11010)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event);
707
+ typedef CUresult (CUDAAPI *PFN_cuGraphEventRecordNodeGetEvent_v11010)(CUgraphNode hNode, CUevent *event_out);
708
+ typedef CUresult (CUDAAPI *PFN_cuGraphEventRecordNodeSetEvent_v11010)(CUgraphNode hNode, CUevent event);
709
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddEventWaitNode_v11010)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event);
710
+ typedef CUresult (CUDAAPI *PFN_cuGraphEventWaitNodeGetEvent_v11010)(CUgraphNode hNode, CUevent *event_out);
711
+ typedef CUresult (CUDAAPI *PFN_cuGraphEventWaitNodeSetEvent_v11010)(CUgraphNode hNode, CUevent event);
712
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddExternalSemaphoresSignalNode_v11020)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams);
713
+ typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresSignalNodeGetParams_v11020)(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *params_out);
714
+ typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresSignalNodeSetParams_v11020)(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams);
715
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddExternalSemaphoresWaitNode_v11020)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams);
716
+ typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresWaitNodeGetParams_v11020)(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *params_out);
717
+ typedef CUresult (CUDAAPI *PFN_cuGraphExternalSemaphoresWaitNodeSetParams_v11020)(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams);
718
+
719
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddBatchMemOpNode_v11070)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams);
720
+ typedef CUresult (CUDAAPI *PFN_cuGraphBatchMemOpNodeGetParams_v11070)(CUgraphNode hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out);
721
+ typedef CUresult (CUDAAPI *PFN_cuGraphBatchMemOpNodeSetParams_v11070)(CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams);
722
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecBatchMemOpNodeSetParams_v11070)(CUgraphExec graphExec, CUgraphNode node, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams);
723
+
724
+ typedef CUresult (CUDAAPI *PFN_cuGraphClone_v10000)(CUgraph *phGraphClone, CUgraph originalGraph);
725
+ typedef CUresult (CUDAAPI *PFN_cuGraphNodeFindInClone_v10000)(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph);
726
+ typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetType_v10000)(CUgraphNode hNode, CUgraphNodeType *type);
727
+ typedef CUresult (CUDAAPI *PFN_cuGraphGetNodes_v10000)(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes);
728
+ typedef CUresult (CUDAAPI *PFN_cuGraphGetRootNodes_v10000)(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes);
729
+ typedef CUresult (CUDAAPI *PFN_cuGraphGetEdges_v10000)(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges);
730
+ typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetDependencies_v10000)(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies);
731
+ typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetDependentNodes_v10000)(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes);
732
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddDependencies_v10000)(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies);
733
+ typedef CUresult (CUDAAPI *PFN_cuGraphRemoveDependencies_v10000)(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies);
734
+ typedef CUresult (CUDAAPI *PFN_cuGraphDestroyNode_v10000)(CUgraphNode hNode);
735
+ typedef CUresult (CUDAAPI *PFN_cuGraphInstantiate_v11000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize);
736
+ typedef CUresult (CUDAAPI *PFN_cuGraphInstantiateWithFlags_v11040)(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags);
737
+
738
+
739
+
740
+
741
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecKernelNodeSetParams_v10010)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams);
742
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecMemcpyNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D_v2 *copyParams, CUcontext ctx);
743
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecMemsetNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS_v1 *memsetParams, CUcontext ctx);
744
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecHostNodeSetParams_v10020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS_v1 *nodeParams);
745
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecChildGraphNodeSetParams_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph);
746
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecEventRecordNodeSetEvent_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event);
747
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecEventWaitNodeSetEvent_v11010)(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event);
748
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecExternalSemaphoresSignalNodeSetParams_v11020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 *nodeParams);
749
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecExternalSemaphoresWaitNodeSetParams_v11020)(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 *nodeParams);
750
+ typedef CUresult (CUDAAPI *PFN_cuGraphUpload_v11010_ptsz)(CUgraphExec hGraphExec, CUstream hStream);
751
+ typedef CUresult (CUDAAPI *PFN_cuGraphLaunch_v10000_ptsz)(CUgraphExec hGraphExec, CUstream hStream);
752
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecDestroy_v10000)(CUgraphExec hGraphExec);
753
+ typedef CUresult (CUDAAPI *PFN_cuGraphDestroy_v10000)(CUgraph hGraph);
754
+ typedef CUresult (CUDAAPI *PFN_cuGraphExecUpdate_v10020)(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out);
755
+ typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeCopyAttributes_v11000)(CUgraphNode dst, CUgraphNode src);
756
+ typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeGetAttribute_v11000)(CUgraphNode hNode, CUkernelNodeAttrID attr, CUkernelNodeAttrValue_v1 *value_out);
757
+ typedef CUresult (CUDAAPI *PFN_cuGraphKernelNodeSetAttribute_v11000)(CUgraphNode hNode, CUkernelNodeAttrID attr, const CUkernelNodeAttrValue_v1 *value);
758
+ typedef CUresult (CUDAAPI *PFN_cuGraphDebugDotPrint_v11030)(CUgraph hGraph, const char *path, unsigned int flags);
759
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddMemAllocNode_v11040)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams);
760
+ typedef CUresult (CUDAAPI *PFN_cuGraphMemAllocNodeGetParams_v11040)(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS *params_out);
761
+ typedef CUresult (CUDAAPI *PFN_cuGraphAddMemFreeNode_v11040)(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUdeviceptr dptr);
762
+ typedef CUresult (CUDAAPI *PFN_cuGraphMemFreeNodeGetParams_v11040)(CUgraphNode hNode, CUdeviceptr *dptr_out);
763
+ typedef CUresult (CUDAAPI *PFN_cuGraphNodeSetEnabled_v11060)(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int isEnabled);
764
+ typedef CUresult (CUDAAPI *PFN_cuGraphNodeGetEnabled_v11060)(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int *isEnabled);
765
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGraphMemTrim_v11040)(CUdevice device);
766
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetGraphMemAttribute_v11040)(CUdevice device, CUgraphMem_attribute attr, void* value);
767
+ typedef CUresult (CUDAAPI *PFN_cuDeviceSetGraphMemAttribute_v11040)(CUdevice device, CUgraphMem_attribute attr, void* value);
768
+ typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveBlocksPerMultiprocessor_v6050)(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize);
769
+ typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000)(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags);
770
+ typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialBlockSize_v6050)(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit);
771
+ typedef CUresult (CUDAAPI *PFN_cuOccupancyMaxPotentialBlockSizeWithFlags_v7000)(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags);
772
+ typedef CUresult (CUDAAPI *PFN_cuOccupancyAvailableDynamicSMemPerBlock_v10020)(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize);
773
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetArray_v2000)(CUtexref hTexRef, CUarray hArray, unsigned int Flags);
774
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmappedArray_v5000)(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags);
775
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress_v3020)(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr_v2 dptr, size_t bytes);
776
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v4010)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v2 *desc, CUdeviceptr_v2 dptr, size_t Pitch);
777
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetFormat_v2000)(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents);
778
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddressMode_v2000)(CUtexref hTexRef, int dim, CUaddress_mode am);
779
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetFilterMode_v2000)(CUtexref hTexRef, CUfilter_mode fm);
780
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapFilterMode_v5000)(CUtexref hTexRef, CUfilter_mode fm);
781
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapLevelBias_v5000)(CUtexref hTexRef, float bias);
782
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetMipmapLevelClamp_v5000)(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp);
783
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetMaxAnisotropy_v5000)(CUtexref hTexRef, unsigned int maxAniso);
784
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetBorderColor_v8000)(CUtexref hTexRef, float *pBorderColor);
785
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetFlags_v2000)(CUtexref hTexRef, unsigned int Flags);
786
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddress_v3020)(CUdeviceptr_v2 *pdptr, CUtexref hTexRef);
787
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetArray_v2000)(CUarray *phArray, CUtexref hTexRef);
788
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmappedArray_v5000)(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef);
789
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddressMode_v2000)(CUaddress_mode *pam, CUtexref hTexRef, int dim);
790
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetFilterMode_v2000)(CUfilter_mode *pfm, CUtexref hTexRef);
791
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetFormat_v2000)(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef);
792
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapFilterMode_v5000)(CUfilter_mode *pfm, CUtexref hTexRef);
793
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapLevelBias_v5000)(float *pbias, CUtexref hTexRef);
794
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetMipmapLevelClamp_v5000)(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef);
795
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetMaxAnisotropy_v5000)(int *pmaxAniso, CUtexref hTexRef);
796
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetBorderColor_v8000)(float *pBorderColor, CUtexref hTexRef);
797
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetFlags_v2000)(unsigned int *pFlags, CUtexref hTexRef);
798
+ typedef CUresult (CUDAAPI *PFN_cuTexRefCreate_v2000)(CUtexref *pTexRef);
799
+ typedef CUresult (CUDAAPI *PFN_cuTexRefDestroy_v2000)(CUtexref hTexRef);
800
+ typedef CUresult (CUDAAPI *PFN_cuSurfRefSetArray_v3000)(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags);
801
+ typedef CUresult (CUDAAPI *PFN_cuSurfRefGetArray_v3000)(CUarray *phArray, CUsurfref hSurfRef);
802
+ typedef CUresult (CUDAAPI *PFN_cuTexObjectCreate_v5000)(CUtexObject_v1 *pTexObject, const CUDA_RESOURCE_DESC_v1 *pResDesc, const CUDA_TEXTURE_DESC_v1 *pTexDesc, const CUDA_RESOURCE_VIEW_DESC_v1 *pResViewDesc);
803
+ typedef CUresult (CUDAAPI *PFN_cuTexObjectDestroy_v5000)(CUtexObject_v1 texObject);
804
+ typedef CUresult (CUDAAPI *PFN_cuTexObjectGetResourceDesc_v5000)(CUDA_RESOURCE_DESC_v1 *pResDesc, CUtexObject_v1 texObject);
805
+ typedef CUresult (CUDAAPI *PFN_cuTexObjectGetTextureDesc_v5000)(CUDA_TEXTURE_DESC_v1 *pTexDesc, CUtexObject_v1 texObject);
806
+ typedef CUresult (CUDAAPI *PFN_cuTexObjectGetResourceViewDesc_v5000)(CUDA_RESOURCE_VIEW_DESC_v1 *pResViewDesc, CUtexObject_v1 texObject);
807
+ typedef CUresult (CUDAAPI *PFN_cuSurfObjectCreate_v5000)(CUsurfObject_v1 *pSurfObject, const CUDA_RESOURCE_DESC_v1 *pResDesc);
808
+ typedef CUresult (CUDAAPI *PFN_cuSurfObjectDestroy_v5000)(CUsurfObject_v1 surfObject);
809
+ typedef CUresult (CUDAAPI *PFN_cuSurfObjectGetResourceDesc_v5000)(CUDA_RESOURCE_DESC_v1 *pResDesc, CUsurfObject_v1 surfObject);
810
+ typedef CUresult (CUDAAPI *PFN_cuDeviceCanAccessPeer_v4000)(int *canAccessPeer, CUdevice_v1 dev, CUdevice_v1 peerDev);
811
+ typedef CUresult (CUDAAPI *PFN_cuCtxEnablePeerAccess_v4000)(CUcontext peerContext, unsigned int Flags);
812
+ typedef CUresult (CUDAAPI *PFN_cuCtxDisablePeerAccess_v4000)(CUcontext peerContext);
813
+ typedef CUresult (CUDAAPI *PFN_cuDeviceGetP2PAttribute_v8000)(int *value, CUdevice_P2PAttribute attrib, CUdevice_v1 srcDevice, CUdevice_v1 dstDevice);
814
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsUnregisterResource_v3000)(CUgraphicsResource resource);
815
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsSubResourceGetMappedArray_v3000)(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel);
816
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedMipmappedArray_v5000)(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource);
817
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedPointer_v3020)(CUdeviceptr_v2 *pDevPtr, size_t *pSize, CUgraphicsResource resource);
818
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceSetMapFlags_v6050)(CUgraphicsResource resource, unsigned int flags);
819
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsMapResources_v7000_ptsz)(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
820
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsUnmapResources_v7000_ptsz)(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
821
+ typedef CUresult (CUDAAPI *PFN_cuGetExportTable_v3000)(const void **ppExportTable, const CUuuid *pExportTableId);
822
+ typedef CUresult (CUDAAPI *PFN_cuFuncGetModule_v11000)(CUmodule *hmod, CUfunction hfunc);
823
+ typedef CUresult (CUDAAPI *PFN_cuGetProcAddress_v11030)(const char *symbol, void **pfn, int driverVersion, cuuint64_t flags);
824
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v3020)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount);
825
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v3020)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount);
826
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v3020)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount);
827
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v3020)(CUarray dstArray, size_t dstOffset, CUdeviceptr_v2 srcDevice, size_t ByteCount);
828
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v3020)(CUdeviceptr_v2 dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount);
829
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v3020)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount);
830
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v3020)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount);
831
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v3020)(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount);
832
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v3020)(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream);
833
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v3020)(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream);
834
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v3020)(const CUDA_MEMCPY2D_v2 *pCopy);
835
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v3020)(const CUDA_MEMCPY2D_v2 *pCopy);
836
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v3020)(const CUDA_MEMCPY3D_v2 *pCopy);
837
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v3020)(CUdeviceptr_v2 dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream);
838
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v3020)(void *dstHost, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream);
839
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v3020)(CUdeviceptr_v2 dstDevice, CUdeviceptr_v2 srcDevice, size_t ByteCount, CUstream hStream);
840
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v3020)(const CUDA_MEMCPY2D_v2 *pCopy, CUstream hStream);
841
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v3020)(const CUDA_MEMCPY3D_v2 *pCopy, CUstream hStream);
842
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v3020)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N);
843
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v3020)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N);
844
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v3020)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N);
845
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height);
846
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height);
847
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height);
848
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy_v4000)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount);
849
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAsync_v4000)(CUdeviceptr_v2 dst, CUdeviceptr_v2 src, size_t ByteCount, CUstream hStream);
850
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyPeer_v4000)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount);
851
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyPeerAsync_v4000)(CUdeviceptr_v2 dstDevice, CUcontext dstContext, CUdeviceptr_v2 srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream);
852
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeer_v4000)(const CUDA_MEMCPY3D_PEER_v1 *pCopy);
853
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DPeerAsync_v4000)(const CUDA_MEMCPY3D_PEER_v1 *pCopy, CUstream hStream);
854
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD8Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned char uc, size_t N, CUstream hStream);
855
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD16Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned short us, size_t N, CUstream hStream);
856
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD32Async_v3020)(CUdeviceptr_v2 dstDevice, unsigned int ui, size_t N, CUstream hStream);
857
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream);
858
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream);
859
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32Async_v3020)(CUdeviceptr_v2 dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream);
860
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetPriority_v5050)(CUstream hStream, int *priority);
861
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetFlags_v5050)(CUstream hStream, unsigned int *flags);
862
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetCtx_v9020)(CUstream hStream, CUcontext *pctx);
863
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitEvent_v3020)(CUstream hStream, CUevent hEvent, unsigned int Flags);
864
+ typedef CUresult (CUDAAPI *PFN_cuStreamAddCallback_v5000)(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags);
865
+ typedef CUresult (CUDAAPI *PFN_cuStreamAttachMemAsync_v6000)(CUstream hStream, CUdeviceptr_v2 dptr, size_t length, unsigned int flags);
866
+ typedef CUresult (CUDAAPI *PFN_cuStreamQuery_v2000)(CUstream hStream);
867
+ typedef CUresult (CUDAAPI *PFN_cuStreamSynchronize_v2000)(CUstream hStream);
868
+ typedef CUresult (CUDAAPI *PFN_cuEventRecord_v2000)(CUevent hEvent, CUstream hStream);
869
+ typedef CUresult (CUDAAPI *PFN_cuEventRecordWithFlags_v11010)(CUevent hEvent, CUstream hStream, unsigned int flags);
870
+ typedef CUresult (CUDAAPI *PFN_cuLaunchKernel_v4000)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra);
871
+
872
+
873
+
874
+ typedef CUresult (CUDAAPI *PFN_cuLaunchHostFunc_v10000)(CUstream hStream, CUhostFn fn, void *userData);
875
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsMapResources_v3000)(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
876
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsUnmapResources_v3000)(unsigned int count, CUgraphicsResource *resources, CUstream hStream);
877
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v8000)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
878
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v8000)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
879
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v9000)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
880
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v9000)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
881
+ typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v8000)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags);
882
+
883
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue32_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
884
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue32_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint32_t value, unsigned int flags);
885
+ typedef CUresult (CUDAAPI *PFN_cuStreamWriteValue64_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
886
+ typedef CUresult (CUDAAPI *PFN_cuStreamWaitValue64_v11070)(CUstream stream, CUdeviceptr_v2 addr, cuuint64_t value, unsigned int flags);
887
+ typedef CUresult (CUDAAPI *PFN_cuStreamBatchMemOp_v11070)(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags);
888
+
889
+ typedef CUresult (CUDAAPI *PFN_cuMemPrefetchAsync_v8000)(CUdeviceptr_v2 devPtr, size_t count, CUdevice_v1 dstDevice, CUstream hStream);
890
+ typedef CUresult (CUDAAPI *PFN_cuLaunchCooperativeKernel_v9000)(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams);
891
+ typedef CUresult (CUDAAPI *PFN_cuSignalExternalSemaphoresAsync_v10000)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream);
892
+ typedef CUresult (CUDAAPI *PFN_cuWaitExternalSemaphoresAsync_v10000)(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 *paramsArray, unsigned int numExtSems, CUstream stream);
893
+ typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10010)(CUstream hStream, CUstreamCaptureMode mode);
894
+ typedef CUresult (CUDAAPI *PFN_cuStreamEndCapture_v10000)(CUstream hStream, CUgraph *phGraph);
895
+ typedef CUresult (CUDAAPI *PFN_cuStreamIsCapturing_v10000)(CUstream hStream, CUstreamCaptureStatus *captureStatus);
896
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v10010)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out);
897
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetCaptureInfo_v11030)(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out);
898
+ typedef CUresult (CUDAAPI *PFN_cuStreamUpdateCaptureDependencies_v11030)(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags);
899
+
900
+
901
+
902
+ typedef CUresult (CUDAAPI *PFN_cuGraphUpload_v11010)(CUgraphExec hGraph, CUstream hStream);
903
+ typedef CUresult (CUDAAPI *PFN_cuGraphLaunch_v10000)(CUgraphExec hGraph, CUstream hStream);
904
+ typedef CUresult (CUDAAPI *PFN_cuStreamCopyAttributes_v11000)(CUstream dstStream, CUstream srcStream);
905
+ typedef CUresult (CUDAAPI *PFN_cuStreamGetAttribute_v11000)(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue_v1 *value);
906
+ typedef CUresult (CUDAAPI *PFN_cuStreamSetAttribute_v11000)(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue_v1 *param);
907
+ typedef CUresult (CUDAAPI *PFN_cuMemMapArrayAsync_v11010)(CUarrayMapInfo_v1 *mapInfoList, unsigned int count, CUstream hStream);
908
+ typedef CUresult (CUDAAPI *PFN_cuMemFreeAsync_v11020)(CUdeviceptr_v2 dptr, CUstream hStream);
909
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocAsync_v11020)(CUdeviceptr_v2 *dptr, size_t bytesize, CUstream hStream);
910
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocFromPoolAsync_v11020)(CUdeviceptr_v2 *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream);
911
+ typedef CUresult (CUDAAPI *PFN_cuFlushGPUDirectRDMAWrites_v11030)(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope);
912
+ typedef CUresult (CUDAAPI *PFN_cuUserObjectCreate_v11030)(CUuserObject *object_out, void *ptr, CUhostFn destroy, unsigned int initialRefcount, unsigned int flags);
913
+ typedef CUresult (CUDAAPI *PFN_cuUserObjectRetain_v11030)(CUuserObject object, unsigned int count);
914
+ typedef CUresult (CUDAAPI *PFN_cuUserObjectRelease_v11030)(CUuserObject object, unsigned int count);
915
+ typedef CUresult (CUDAAPI *PFN_cuGraphRetainUserObject_v11030)(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags);
916
+ typedef CUresult (CUDAAPI *PFN_cuGraphReleaseUserObject_v11030)(CUgraph graph, CUuserObject object, unsigned int count);
917
+
918
+ typedef CUresult (CUDAAPI *PFN_cuModuleGetLoadingMode_v11070)(CUmoduleLoadingMode *mode);
919
+
920
+
921
+ typedef CUresult (CUDAAPI *PFN_cuMemGetHandleForAddressRange_v11070)(void *handle, CUdeviceptr dptr, size_t size, CUmemRangeHandleType handleType, unsigned long long flags);
922
+
923
+
924
+ /*
925
+ * Type definitions for older versioned functions in cuda.h
926
+ */
927
+ #if defined(__CUDA_API_VERSION_INTERNAL)
928
+ typedef CUresult (CUDAAPI *PFN_cuMemHostRegister_v4000)(void *p, size_t bytesize, unsigned int Flags);
929
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceSetMapFlags_v3000)(CUgraphicsResource resource, unsigned int flags);
930
+ typedef CUresult (CUDAAPI *PFN_cuLinkCreate_v5050)(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut);
931
+ typedef CUresult (CUDAAPI *PFN_cuLinkAddData_v5050)(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, unsigned int numOptions, CUjit_option *options, void **optionValues);
932
+ typedef CUresult (CUDAAPI *PFN_cuLinkAddFile_v5050)(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues);
933
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v3020)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v2 *desc, CUdeviceptr_v2 dptr, size_t Pitch);
934
+ typedef CUresult (CUDAAPI *PFN_cuDeviceTotalMem_v2000)(unsigned int *bytes, CUdevice_v1 dev);
935
+ typedef CUresult (CUDAAPI *PFN_cuCtxCreate_v2000)(CUcontext *pctx, unsigned int flags, CUdevice_v1 dev);
936
+ typedef CUresult (CUDAAPI *PFN_cuModuleGetGlobal_v2000)(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name);
937
+ typedef CUresult (CUDAAPI *PFN_cuMemGetInfo_v2000)(unsigned int *free, unsigned int *total);
938
+ typedef CUresult (CUDAAPI *PFN_cuMemAlloc_v2000)(CUdeviceptr_v1 *dptr, unsigned int bytesize);
939
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocPitch_v2000)(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes);
940
+ typedef CUresult (CUDAAPI *PFN_cuMemFree_v2000)(CUdeviceptr_v1 dptr);
941
+ typedef CUresult (CUDAAPI *PFN_cuMemGetAddressRange_v2000)(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr);
942
+ typedef CUresult (CUDAAPI *PFN_cuMemAllocHost_v2000)(void **pp, unsigned int bytesize);
943
+ typedef CUresult (CUDAAPI *PFN_cuMemHostGetDevicePointer_v2020)(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags);
944
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoD_v2000)(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount);
945
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoH_v2000)(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount);
946
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoD_v2000)(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount);
947
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoA_v2000)(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount);
948
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoD_v2000)(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount);
949
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoA_v2000)(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount);
950
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoH_v2000)(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount);
951
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoA_v2000)(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount);
952
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoAAsync_v2000)(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream);
953
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyAtoHAsync_v2000)(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream);
954
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2D_v2000)(const CUDA_MEMCPY2D_v1 *pCopy);
955
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2DUnaligned_v2000)(const CUDA_MEMCPY2D_v1 *pCopy);
956
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3D_v2000)(const CUDA_MEMCPY3D_v1 *pCopy);
957
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyHtoDAsync_v2000)(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream);
958
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoHAsync_v2000)(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream);
959
+ typedef CUresult (CUDAAPI *PFN_cuMemcpyDtoDAsync_v3000)(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream);
960
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy2DAsync_v2000)(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream);
961
+ typedef CUresult (CUDAAPI *PFN_cuMemcpy3DAsync_v2000)(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream);
962
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD8_v2000)(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N);
963
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD16_v2000)(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N);
964
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD32_v2000)(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N);
965
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D8_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height);
966
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D16_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height);
967
+ typedef CUresult (CUDAAPI *PFN_cuMemsetD2D32_v2000)(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height);
968
+ typedef CUresult (CUDAAPI *PFN_cuArrayCreate_v2000)(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray);
969
+ typedef CUresult (CUDAAPI *PFN_cuArrayGetDescriptor_v2000)(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray);
970
+ typedef CUresult (CUDAAPI *PFN_cuArray3DCreate_v2000)(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray);
971
+ typedef CUresult (CUDAAPI *PFN_cuArray3DGetDescriptor_v2000)(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray);
972
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress_v2000)(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes);
973
+ typedef CUresult (CUDAAPI *PFN_cuTexRefSetAddress2D_v2020)(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch);
974
+ typedef CUresult (CUDAAPI *PFN_cuTexRefGetAddress_v2000)(CUdeviceptr_v1 *pdptr, CUtexref hTexRef);
975
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsResourceGetMappedPointer_v3000)(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource);
976
+ typedef CUresult (CUDAAPI *PFN_cuCtxDestroy_v2000)(CUcontext ctx);
977
+ typedef CUresult (CUDAAPI *PFN_cuCtxPopCurrent_v2000)(CUcontext *pctx);
978
+ typedef CUresult (CUDAAPI *PFN_cuCtxPushCurrent_v2000)(CUcontext ctx);
979
+ typedef CUresult (CUDAAPI *PFN_cuStreamDestroy_v2000)(CUstream hStream);
980
+ typedef CUresult (CUDAAPI *PFN_cuEventDestroy_v2000)(CUevent hEvent);
981
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxRelease_v7000)(CUdevice_v1 dev);
982
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxReset_v7000)(CUdevice_v1 dev);
983
+ typedef CUresult (CUDAAPI *PFN_cuDevicePrimaryCtxSetFlags_v7000)(CUdevice_v1 dev, unsigned int flags);
984
+ typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10000)(CUstream hStream);
985
+ typedef CUresult (CUDAAPI *PFN_cuStreamBeginCapture_v10000_ptsz)(CUstream hStream);
986
+ typedef CUresult (CUDAAPI *PFN_cuIpcOpenMemHandle_v4010)(CUdeviceptr_v2 *pdptr, CUipcMemHandle_v1 handle, unsigned int Flags);
987
+ typedef CUresult (CUDAAPI *PFN_cuGraphInstantiate_v10000)(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize);
988
+ #endif
989
+
990
+ #ifdef __cplusplus
991
+ }
992
+ #endif // __cplusplus
993
+
994
+ #endif // file guard
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAU.h ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2010-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAU_H
51
+ #define CUDAVDPAU_H
52
+
53
+ #ifdef CUDA_FORCE_API_VERSION
54
+ #error "CUDA_FORCE_API_VERSION is no longer supported."
55
+ #endif
56
+
57
+ #define cuVDPAUCtxCreate cuVDPAUCtxCreate_v2
58
+
59
+ #ifdef __cplusplus
60
+ extern "C" {
61
+ #endif
62
+
63
+ /**
64
+ * \defgroup CUDA_VDPAU VDPAU Interoperability
65
+ * \ingroup CUDA_DRIVER
66
+ *
67
+ * ___MANBRIEF___ VDPAU interoperability functions of the low-level CUDA driver
68
+ * API (___CURRENT_FILE___) ___ENDMANBRIEF___
69
+ *
70
+ * This section describes the VDPAU interoperability functions of the
71
+ * low-level CUDA driver application programming interface.
72
+ *
73
+ * @{
74
+ */
75
+
76
+ /**
77
+ * \brief Gets the CUDA device associated with a VDPAU device
78
+ *
79
+ * Returns in \p *pDevice the CUDA device associated with a \p vdpDevice, if
80
+ * applicable.
81
+ *
82
+ * \param pDevice - Device associated with vdpDevice
83
+ * \param vdpDevice - A VdpDevice handle
84
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
85
+ *
86
+ * \return
87
+ * ::CUDA_SUCCESS,
88
+ * ::CUDA_ERROR_DEINITIALIZED,
89
+ * ::CUDA_ERROR_NOT_INITIALIZED,
90
+ * ::CUDA_ERROR_INVALID_CONTEXT,
91
+ * ::CUDA_ERROR_INVALID_VALUE
92
+ * \notefnerr
93
+ *
94
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
95
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
96
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
97
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
98
+ * ::cudaVDPAUGetDevice
99
+ */
100
+ CUresult CUDAAPI cuVDPAUGetDevice(CUdevice *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
101
+
102
+ /**
103
+ * \brief Create a CUDA context for interoperability with VDPAU
104
+ *
105
+ * Creates a new CUDA context, initializes VDPAU interoperability, and
106
+ * associates the CUDA context with the calling thread. It must be called
107
+ * before performing any other VDPAU interoperability operations. It may fail
108
+ * if the needed VDPAU driver facilities are not available. For usage of the
109
+ * \p flags parameter, see ::cuCtxCreate().
110
+ *
111
+ * \param pCtx - Returned CUDA context
112
+ * \param flags - Options for CUDA context creation
113
+ * \param device - Device on which to create the context
114
+ * \param vdpDevice - The VdpDevice to interop with
115
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
116
+ *
117
+ * \return
118
+ * ::CUDA_SUCCESS,
119
+ * ::CUDA_ERROR_DEINITIALIZED,
120
+ * ::CUDA_ERROR_NOT_INITIALIZED,
121
+ * ::CUDA_ERROR_INVALID_CONTEXT,
122
+ * ::CUDA_ERROR_INVALID_VALUE,
123
+ * ::CUDA_ERROR_OUT_OF_MEMORY
124
+ * \notefnerr
125
+ *
126
+ * \sa ::cuCtxCreate, ::cuGraphicsVDPAURegisterVideoSurface,
127
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
128
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
129
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
130
+ * ::cuVDPAUGetDevice
131
+ */
132
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
133
+
134
+ /**
135
+ * \brief Registers a VDPAU VdpVideoSurface object
136
+ *
137
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by
138
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
139
+ * The surface's intended usage is specified using \p flags, as follows:
140
+ *
141
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
142
+ * resource will be used. It is therefore assumed that this resource will be
143
+ * read from and written to by CUDA. This is the default value.
144
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
145
+ * will not write to this resource.
146
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
147
+ * CUDA will not read from this resource and will write over the
148
+ * entire contents of the resource, so none of the data previously
149
+ * stored in the resource will be preserved.
150
+ *
151
+ * The VdpVideoSurface is presented as an array of subresources that may be
152
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
153
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
154
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
155
+ *
156
+ * \htmlonly
157
+ * <table>
158
+ * <tr><th>VdpChromaType </th><th>arrayIndex</th><th>Size </th><th>Format</th><th>Content </th></tr>
159
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_420</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
160
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
161
+ * <tr> <td>2 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Top-field chroma </td></tr>
162
+ * <tr> <td>3 </td><td>w/2 x h/4</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
163
+ * <tr><td rowspan="4" valign="top">VDP_CHROMA_TYPE_422</td><td>0 </td><td>w x h/2</td><td>R8 </td><td>Top-field luma </td></tr>
164
+ * <tr> <td>1 </td><td>w x h/2</td><td>R8 </td><td>Bottom-field luma </td></tr>
165
+ * <tr> <td>2 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Top-field chroma </td></tr>
166
+ * <tr> <td>3 </td><td>w/2 x h/2</td><td>R8G8 </td><td>Bottom-field chroma</td></tr>
167
+ * </table>
168
+ * \endhtmlonly
169
+ *
170
+ * \latexonly
171
+ * \begin{tabular}{|l|l|l|l|l|}
172
+ * \hline
173
+ * VdpChromaType & arrayIndex & Size & Format & Content \\
174
+ * \hline
175
+ * VDP\_CHROMA\_TYPE\_420 & 0 & w x h/2 & R8 & Top-field luma \\
176
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
177
+ * & 2 & w/2 x h/4 & R8G8 & Top-field chroma \\
178
+ * & 3 & w/2 x h/4 & R8G8 & Bottom-field chroma \\
179
+ * \hline
180
+ * VDP\_CHROMA\_TYPE\_422 & 0 & w x h/2 & R8 & Top-field luma \\
181
+ * & 1 & w x h/2 & R8 & Bottom-field luma \\
182
+ * & 2 & w/2 x h/2 & R8G8 & Top-field chroma \\
183
+ * & 3 & w/2 x h/2 & R8G8 & Bottom-field chroma \\
184
+ * \hline
185
+ * \end{tabular}
186
+ * \endlatexonly
187
+ *
188
+ * \param pCudaResource - Pointer to the returned object handle
189
+ * \param vdpSurface - The VdpVideoSurface to be registered
190
+ * \param flags - Map flags
191
+ *
192
+ * \return
193
+ * ::CUDA_SUCCESS,
194
+ * ::CUDA_ERROR_INVALID_HANDLE,
195
+ * ::CUDA_ERROR_ALREADY_MAPPED,
196
+ * ::CUDA_ERROR_INVALID_CONTEXT,
197
+ * \notefnerr
198
+ *
199
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
200
+ * ::cuGraphicsVDPAURegisterOutputSurface, ::cuGraphicsUnregisterResource,
201
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
202
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
203
+ * ::cuVDPAUGetDevice,
204
+ * ::cudaGraphicsVDPAURegisterVideoSurface
205
+ */
206
+ CUresult CUDAAPI cuGraphicsVDPAURegisterVideoSurface(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
207
+
208
+ /**
209
+ * \brief Registers a VDPAU VdpOutputSurface object
210
+ *
211
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by
212
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
213
+ * The surface's intended usage is specified using \p flags, as follows:
214
+ *
215
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this
216
+ * resource will be used. It is therefore assumed that this resource will be
217
+ * read from and written to by CUDA. This is the default value.
218
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY: Specifies that CUDA
219
+ * will not write to this resource.
220
+ * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD: Specifies that
221
+ * CUDA will not read from this resource and will write over the
222
+ * entire contents of the resource, so none of the data previously
223
+ * stored in the resource will be preserved.
224
+ *
225
+ * The VdpOutputSurface is presented as an array of subresources that may be
226
+ * accessed using pointers returned by ::cuGraphicsSubResourceGetMappedArray.
227
+ * The exact number of valid \p arrayIndex values depends on the VDPAU surface
228
+ * format. The mapping is shown in the table below. \p mipLevel must be 0.
229
+ *
230
+ * \htmlonly
231
+ * <table>
232
+ * <tr><th>VdpRGBAFormat </th><th>arrayIndex</th><th>Size </th><th>Format </th><th>Content </th></tr>
233
+ * <tr><td>VDP_RGBA_FORMAT_B8G8R8A8 </td><td>0 </td><td>w x h</td><td>ARGB8 </td><td>Entire surface</td></tr>
234
+ * <tr><td>VDP_RGBA_FORMAT_R10G10B10A2</td><td>0 </td><td>w x h</td><td>A2BGR10</td><td>Entire surface</td></tr>
235
+ * </table>
236
+ * \endhtmlonly
237
+ *
238
+ * \latexonly
239
+ * \begin{tabular}{|l|l|l|l|l|}
240
+ * \hline
241
+ * VdpRGBAFormat & arrayIndex & Size & Format & Content \\
242
+ * \hline
243
+ * VDP\_RGBA\_FORMAT\_B8G8R8A8 & 0 & w x h & ARGB8 & Entire surface \\
244
+ * VDP\_RGBA\_FORMAT\_R10G10B10A2 & 0 & w x h & A2BGR10 & Entire surface \\
245
+ * \hline
246
+ * \end{tabular}
247
+ * \endlatexonly
248
+ *
249
+ * \param pCudaResource - Pointer to the returned object handle
250
+ * \param vdpSurface - The VdpOutputSurface to be registered
251
+ * \param flags - Map flags
252
+ *
253
+ * \return
254
+ * ::CUDA_SUCCESS,
255
+ * ::CUDA_ERROR_INVALID_HANDLE,
256
+ * ::CUDA_ERROR_ALREADY_MAPPED,
257
+ * ::CUDA_ERROR_INVALID_CONTEXT,
258
+ * \notefnerr
259
+ *
260
+ * \sa ::cuCtxCreate, ::cuVDPAUCtxCreate,
261
+ * ::cuGraphicsVDPAURegisterVideoSurface, ::cuGraphicsUnregisterResource,
262
+ * ::cuGraphicsResourceSetMapFlags, ::cuGraphicsMapResources,
263
+ * ::cuGraphicsUnmapResources, ::cuGraphicsSubResourceGetMappedArray,
264
+ * ::cuVDPAUGetDevice,
265
+ * ::cudaGraphicsVDPAURegisterOutputSurface
266
+ */
267
+ CUresult CUDAAPI cuGraphicsVDPAURegisterOutputSurface(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
268
+
269
+ /** @} */ /* END CUDA_VDPAU */
270
+
271
+
272
+ #if defined(__CUDA_API_VERSION_INTERNAL)
273
+ #undef cuVDPAUCtxCreate
274
+
275
+ CUresult CUDAAPI cuVDPAUCtxCreate(CUcontext *pCtx, unsigned int flags, CUdevice device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
276
+ #endif /* __CUDA_API_VERSION_INTERNAL */
277
+
278
+ #ifdef __cplusplus
279
+ };
280
+ #endif
281
+
282
+ #endif
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudaVDPAUTypedefs.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2020-2021 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef CUDAVDPAUTYPEDEFS_H
51
+ #define CUDAVDPAUTYPEDEFS_H
52
+
53
+ // Dependent includes for cudavdpau.h
54
+ #include <vdpau/vdpau.h>
55
+
56
+ #include <cudaVDPAU.h>
57
+
58
+ #ifdef __cplusplus
59
+ extern "C" {
60
+ #endif // __cplusplus
61
+
62
+ /*
63
+ * Macros for the latest version for each driver function in cudaVDPAU.h
64
+ */
65
+ #define PFN_cuVDPAUGetDevice PFN_cuVDPAUGetDevice_v3010
66
+ #define PFN_cuVDPAUCtxCreate PFN_cuVDPAUCtxCreate_v3020
67
+ #define PFN_cuGraphicsVDPAURegisterVideoSurface PFN_cuGraphicsVDPAURegisterVideoSurface_v3010
68
+ #define PFN_cuGraphicsVDPAURegisterOutputSurface PFN_cuGraphicsVDPAURegisterOutputSurface_v3010
69
+
70
+
71
+ /**
72
+ * Type definitions for functions defined in cudaVDPAU.h
73
+ */
74
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUGetDevice_v3010)(CUdevice_v1 *pDevice, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
75
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3020)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
76
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterVideoSurface_v3010)(CUgraphicsResource *pCudaResource, VdpVideoSurface vdpSurface, unsigned int flags);
77
+ typedef CUresult (CUDAAPI *PFN_cuGraphicsVDPAURegisterOutputSurface_v3010)(CUgraphicsResource *pCudaResource, VdpOutputSurface vdpSurface, unsigned int flags);
78
+
79
+ /*
80
+ * Type definitions for older versioned functions in cudaVDPAU.h
81
+ */
82
+ #if defined(__CUDA_API_VERSION_INTERNAL)
83
+ typedef CUresult (CUDAAPI *PFN_cuVDPAUCtxCreate_v3010)(CUcontext *pCtx, unsigned int flags, CUdevice_v1 device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
84
+ #endif
85
+
86
+ #ifdef __cplusplus
87
+ }
88
+ #endif // __cplusplus
89
+
90
+ #endif // file guard
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier.h ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_AWBARRIER_H_
51
+ # define _CUDA_AWBARRIER_H_
52
+
53
+ # include "cuda_awbarrier_primitives.h"
54
+
55
+ # if !defined(_CUDA_AWBARRIER_ARCH_700_OR_LATER)
56
+ # error This file requires compute capability 7.0 or greater.
57
+ # endif
58
+
59
+ # if !defined(_CUDA_AWBARRIER_CPLUSPLUS_11_OR_LATER)
60
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
61
+ -std=c++11 compiler option.
62
+ # endif
63
+
64
+ _CUDA_AWBARRIER_BEGIN_NAMESPACE
65
+
66
+ class awbarrier {
67
+ public:
68
+ class arrival_token {
69
+ public:
70
+ arrival_token() = default;
71
+ ~arrival_token() = default;
72
+ _CUDA_AWBARRIER_QUALIFIER uint32_t pending_count() const;
73
+ private:
74
+ _CUDA_AWBARRIER_QUALIFIER arrival_token(uint64_t token);
75
+ uint64_t token;
76
+ friend awbarrier;
77
+ };
78
+ awbarrier() = default;
79
+ awbarrier(const awbarrier&) = delete;
80
+ awbarrier& operator=(const awbarrier&) = delete;
81
+ ~awbarrier() = default;
82
+
83
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive();
84
+ _CUDA_AWBARRIER_QUALIFIER arrival_token arrive_and_drop();
85
+ _CUDA_AWBARRIER_QUALIFIER bool timed_wait(arrival_token token, uint32_t hint_cycles);
86
+ _CUDA_AWBARRIER_QUALIFIER void wait(arrival_token token);
87
+ _CUDA_AWBARRIER_QUALIFIER void arrive_and_wait();
88
+ _CUDA_AWBARRIER_STATIC_QUALIFIER __host__ constexpr uint32_t max();
89
+ private:
90
+ uint64_t barrier;
91
+ friend _CUDA_AWBARRIER_QUALIFIER void init(awbarrier* barrier, uint32_t expected_count);
92
+ friend _CUDA_AWBARRIER_QUALIFIER void inval(awbarrier* barrier);
93
+ friend class pipeline;
94
+ };
95
+
96
+ _CUDA_AWBARRIER_QUALIFIER
97
+ uint32_t awbarrier::arrival_token::pending_count() const
98
+ {
99
+ const uint32_t pending_count = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(this->token);
100
+ return (pending_count >> 15);
101
+ }
102
+
103
+ _CUDA_AWBARRIER_QUALIFIER
104
+ awbarrier::arrival_token::arrival_token(uint64_t token)
105
+ : token(token)
106
+ {
107
+ }
108
+
109
+ _CUDA_AWBARRIER_QUALIFIER
110
+ void init(awbarrier* barrier, uint32_t expected_count)
111
+ {
112
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
113
+ _CUDA_AWBARRIER_ASSERT(expected_count > 0 && expected_count <= _CUDA_AWBARRIER_MAX_COUNT);
114
+
115
+ const uint32_t init_count = (expected_count << 15) + expected_count;
116
+
117
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(&barrier->barrier, init_count);
118
+ }
119
+
120
+ _CUDA_AWBARRIER_QUALIFIER
121
+ void inval(awbarrier* barrier)
122
+ {
123
+ _CUDA_AWBARRIER_ASSERT(__isShared(barrier));
124
+
125
+ _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(&barrier->barrier);
126
+ }
127
+
128
+ _CUDA_AWBARRIER_QUALIFIER
129
+ awbarrier::arrival_token awbarrier::arrive()
130
+ {
131
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
132
+
133
+ const uint32_t arrive_count = 1 << 15;
134
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<false>(&this->barrier, arrive_count);
135
+
136
+ (void)_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<false>(&this->barrier);
137
+
138
+ return arrival_token(token);
139
+ }
140
+
141
+ _CUDA_AWBARRIER_QUALIFIER
142
+ awbarrier::arrival_token awbarrier::arrive_and_drop()
143
+ {
144
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
145
+
146
+ const uint32_t arrive_count = 1 << 15;
147
+ const uint64_t token = _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop_no_complete<true>(&this->barrier, arrive_count);
148
+
149
+ (void)_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop<true>(&this->barrier);
150
+
151
+ return arrival_token(token);
152
+ }
153
+
154
+ _CUDA_AWBARRIER_QUALIFIER
155
+ bool awbarrier::timed_wait(arrival_token token, uint32_t hint_cycles)
156
+ {
157
+ constexpr uint64_t max_busy_wait_cycles = 1024;
158
+ constexpr uint32_t max_sleep_ns = 1 << 20;
159
+
160
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
161
+
162
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
163
+ return true;
164
+ }
165
+
166
+ uint64_t start_cycles = clock64();
167
+ uint64_t elapsed_cycles = 0;
168
+ uint32_t sleep_ns = 32;
169
+ while (elapsed_cycles < hint_cycles) {
170
+ if (_CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(&this->barrier, token.token)) {
171
+ return true;
172
+ }
173
+
174
+ if (elapsed_cycles > max_busy_wait_cycles) {
175
+ __nanosleep(sleep_ns);
176
+ if (sleep_ns < max_sleep_ns) {
177
+ sleep_ns *= 2;
178
+ }
179
+ }
180
+
181
+ elapsed_cycles = clock64() - start_cycles;
182
+ }
183
+
184
+ return false;
185
+ }
186
+
187
+ _CUDA_AWBARRIER_QUALIFIER
188
+ void awbarrier::wait(arrival_token token)
189
+ {
190
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
191
+
192
+ while (!timed_wait(token, ~0u));
193
+ }
194
+
195
+ _CUDA_AWBARRIER_QUALIFIER
196
+ void awbarrier::arrive_and_wait()
197
+ {
198
+ _CUDA_AWBARRIER_ASSERT(__isShared(&this->barrier));
199
+
200
+ this->wait(this->arrive());
201
+ }
202
+
203
+ _CUDA_AWBARRIER_QUALIFIER __host__
204
+ constexpr uint32_t awbarrier::max()
205
+ {
206
+ return _CUDA_AWBARRIER_MAX_COUNT;
207
+ }
208
+
209
+ _CUDA_AWBARRIER_END_NAMESPACE
210
+
211
+ #endif /* !_CUDA_AWBARRIER_H_ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_egl_interop.h ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_EGL_INTEROP_H__)
51
+ #define __CUDA_EGL_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+ #include "cuda_runtime.h"
55
+ #include "cudart_platform.h"
56
+ #include "EGL/egl.h"
57
+ #include "EGL/eglext.h"
58
+
59
+ #if defined(__cplusplus)
60
+ extern "C" {
61
+ #endif /* __cplusplus */
62
+
63
+ /**
64
+ * \addtogroup CUDART_TYPES
65
+ * @{
66
+ */
67
+
68
+ /**
69
+ * Maximum number of planes per frame
70
+ */
71
+ #define CUDA_EGL_MAX_PLANES 3
72
+
73
+ /**
74
+ * CUDA EglFrame type - array or pointer
75
+ */
76
+ typedef enum cudaEglFrameType_enum
77
+ {
78
+ cudaEglFrameTypeArray = 0, /**< Frame type CUDA array */
79
+ cudaEglFrameTypePitch = 1, /**< Frame type CUDA pointer */
80
+ } cudaEglFrameType;
81
+
82
+ /**
83
+ * Resource location flags- sysmem or vidmem
84
+ *
85
+ * For CUDA context on iGPU, since video and system memory are equivalent -
86
+ * these flags will not have an effect on the execution.
87
+ *
88
+ * For CUDA context on dGPU, applications can use the flag ::cudaEglResourceLocationFlags
89
+ * to give a hint about the desired location.
90
+ *
91
+ * ::cudaEglResourceLocationSysmem - the frame data is made resident on the system memory
92
+ * to be accessed by CUDA.
93
+ *
94
+ * ::cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated
95
+ * video memory to be accessed by CUDA.
96
+ *
97
+ * There may be an additional latency due to new allocation and data migration,
98
+ * if the frame is produced on a different memory.
99
+ */
100
+ typedef enum cudaEglResourceLocationFlags_enum {
101
+ cudaEglResourceLocationSysmem = 0x00, /**< Resource location sysmem */
102
+ cudaEglResourceLocationVidmem = 0x01, /**< Resource location vidmem */
103
+ } cudaEglResourceLocationFlags;
104
+
105
+ /**
106
+ * CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.
107
+ */
108
+ typedef enum cudaEglColorFormat_enum {
109
+ cudaEglColorFormatYUV420Planar = 0, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
110
+ cudaEglColorFormatYUV420SemiPlanar = 1, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV420Planar. */
111
+ cudaEglColorFormatYUV422Planar = 2, /**< Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
112
+ cudaEglColorFormatYUV422SemiPlanar = 3, /**< Y, UV in two surfaces with VU byte ordering, width, height ratio same as YUV422Planar. */
113
+ cudaEglColorFormatARGB = 6, /**< R/G/B/A four channels in one surface with BGRA byte ordering. */
114
+ cudaEglColorFormatRGBA = 7, /**< R/G/B/A four channels in one surface with ABGR byte ordering. */
115
+ cudaEglColorFormatL = 8, /**< single luminance channel in one surface. */
116
+ cudaEglColorFormatR = 9, /**< single color channel in one surface. */
117
+ cudaEglColorFormatYUV444Planar = 10, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
118
+ cudaEglColorFormatYUV444SemiPlanar = 11, /**< Y, UV in two surfaces (UV as one surface) with VU byte ordering, width, height ratio same as YUV444Planar. */
119
+ cudaEglColorFormatYUYV422 = 12, /**< Y, U, V in one surface, interleaved as UYVY in one channel. */
120
+ cudaEglColorFormatUYVY422 = 13, /**< Y, U, V in one surface, interleaved as YUYV in one channel. */
121
+ cudaEglColorFormatABGR = 14, /**< R/G/B/A four channels in one surface with RGBA byte ordering. */
122
+ cudaEglColorFormatBGRA = 15, /**< R/G/B/A four channels in one surface with ARGB byte ordering. */
123
+ cudaEglColorFormatA = 16, /**< Alpha color format - one channel in one surface. */
124
+ cudaEglColorFormatRG = 17, /**< R/G color format - two channels in one surface with GR byte ordering */
125
+ cudaEglColorFormatAYUV = 18, /**< Y, U, V, A four channels in one surface, interleaved as VUYA. */
126
+ cudaEglColorFormatYVU444SemiPlanar = 19, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
127
+ cudaEglColorFormatYVU422SemiPlanar = 20, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
128
+ cudaEglColorFormatYVU420SemiPlanar = 21, /**< Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
129
+ cudaEglColorFormatY10V10U10_444SemiPlanar = 22, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
130
+ cudaEglColorFormatY10V10U10_420SemiPlanar = 23, /**< Y10, V10U10 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
131
+ cudaEglColorFormatY12V12U12_444SemiPlanar = 24, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
132
+ cudaEglColorFormatY12V12U12_420SemiPlanar = 25, /**< Y12, V12U12 in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
133
+ cudaEglColorFormatVYUY_ER = 26, /**< Extended Range Y, U, V in one surface, interleaved as YVYU in one channel. */
134
+ cudaEglColorFormatUYVY_ER = 27, /**< Extended Range Y, U, V in one surface, interleaved as YUYV in one channel. */
135
+ cudaEglColorFormatYUYV_ER = 28, /**< Extended Range Y, U, V in one surface, interleaved as UYVY in one channel. */
136
+ cudaEglColorFormatYVYU_ER = 29, /**< Extended Range Y, U, V in one surface, interleaved as VYUY in one channel. */
137
+ cudaEglColorFormatYUVA_ER = 31, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as AVUY. */
138
+ cudaEglColorFormatAYUV_ER = 32, /**< Extended Range Y, U, V, A four channels in one surface, interleaved as VUYA. */
139
+ cudaEglColorFormatYUV444Planar_ER = 33, /**< Extended Range Y, U, V in three surfaces, U/V width = Y width, U/V height = Y height. */
140
+ cudaEglColorFormatYUV422Planar_ER = 34, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
141
+ cudaEglColorFormatYUV420Planar_ER = 35, /**< Extended Range Y, U, V in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
142
+ cudaEglColorFormatYUV444SemiPlanar_ER = 36, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = Y width, U/V height = Y height. */
143
+ cudaEglColorFormatYUV422SemiPlanar_ER = 37, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
144
+ cudaEglColorFormatYUV420SemiPlanar_ER = 38, /**< Extended Range Y, UV in two surfaces (UV as one surface) with VU byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
145
+ cudaEglColorFormatYVU444Planar_ER = 39, /**< Extended Range Y, V, U in three surfaces, U/V width = Y width, U/V height = Y height. */
146
+ cudaEglColorFormatYVU422Planar_ER = 40, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = Y height. */
147
+ cudaEglColorFormatYVU420Planar_ER = 41, /**< Extended Range Y, V, U in three surfaces, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
148
+ cudaEglColorFormatYVU444SemiPlanar_ER = 42, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = Y width, U/V height = Y height. */
149
+ cudaEglColorFormatYVU422SemiPlanar_ER = 43, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = Y height. */
150
+ cudaEglColorFormatYVU420SemiPlanar_ER = 44, /**< Extended Range Y, VU in two surfaces (VU as one surface) with UV byte ordering, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
151
+ cudaEglColorFormatBayerRGGB = 45, /**< Bayer format - one channel in one surface with interleaved RGGB ordering. */
152
+ cudaEglColorFormatBayerBGGR = 46, /**< Bayer format - one channel in one surface with interleaved BGGR ordering. */
153
+ cudaEglColorFormatBayerGRBG = 47, /**< Bayer format - one channel in one surface with interleaved GRBG ordering. */
154
+ cudaEglColorFormatBayerGBRG = 48, /**< Bayer format - one channel in one surface with interleaved GBRG ordering. */
155
+ cudaEglColorFormatBayer10RGGB = 49, /**< Bayer10 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
156
+ cudaEglColorFormatBayer10BGGR = 50, /**< Bayer10 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
157
+ cudaEglColorFormatBayer10GRBG = 51, /**< Bayer10 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
158
+ cudaEglColorFormatBayer10GBRG = 52, /**< Bayer10 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
159
+ cudaEglColorFormatBayer12RGGB = 53, /**< Bayer12 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
160
+ cudaEglColorFormatBayer12BGGR = 54, /**< Bayer12 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
161
+ cudaEglColorFormatBayer12GRBG = 55, /**< Bayer12 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
162
+ cudaEglColorFormatBayer12GBRG = 56, /**< Bayer12 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
163
+ cudaEglColorFormatBayer14RGGB = 57, /**< Bayer14 format - one channel in one surface with interleaved RGGB ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
164
+ cudaEglColorFormatBayer14BGGR = 58, /**< Bayer14 format - one channel in one surface with interleaved BGGR ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
165
+ cudaEglColorFormatBayer14GRBG = 59, /**< Bayer14 format - one channel in one surface with interleaved GRBG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
166
+ cudaEglColorFormatBayer14GBRG = 60, /**< Bayer14 format - one channel in one surface with interleaved GBRG ordering. Out of 16 bits, 14 bits used 2 bits No-op. */
167
+ cudaEglColorFormatBayer20RGGB = 61, /**< Bayer20 format - one channel in one surface with interleaved RGGB ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
168
+ cudaEglColorFormatBayer20BGGR = 62, /**< Bayer20 format - one channel in one surface with interleaved BGGR ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
169
+ cudaEglColorFormatBayer20GRBG = 63, /**< Bayer20 format - one channel in one surface with interleaved GRBG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
170
+ cudaEglColorFormatBayer20GBRG = 64, /**< Bayer20 format - one channel in one surface with interleaved GBRG ordering. Out of 32 bits, 20 bits used 12 bits No-op. */
171
+ cudaEglColorFormatYVU444Planar = 65, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height. */
172
+ cudaEglColorFormatYVU422Planar = 66, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height. */
173
+ cudaEglColorFormatYVU420Planar = 67, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
174
+ cudaEglColorFormatBayerIspRGGB = 68, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved RGGB ordering and mapped to opaque integer datatype. */
175
+ cudaEglColorFormatBayerIspBGGR = 69, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved BGGR ordering and mapped to opaque integer datatype. */
176
+ cudaEglColorFormatBayerIspGRBG = 70, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GRBG ordering and mapped to opaque integer datatype. */
177
+ cudaEglColorFormatBayerIspGBRG = 71, /**< Nvidia proprietary Bayer ISP format - one channel in one surface with interleaved GBRG ordering and mapped to opaque integer datatype. */
178
+ cudaEglColorFormatBayerBCCR = 72, /**< Bayer format - one channel in one surface with interleaved BCCR ordering. */
179
+ cudaEglColorFormatBayerRCCB = 73, /**< Bayer format - one channel in one surface with interleaved RCCB ordering. */
180
+ cudaEglColorFormatBayerCRBC = 74, /**< Bayer format - one channel in one surface with interleaved CRBC ordering. */
181
+ cudaEglColorFormatBayerCBRC = 75, /**< Bayer format - one channel in one surface with interleaved CBRC ordering. */
182
+ cudaEglColorFormatBayer10CCCC = 76, /**< Bayer10 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 10 bits used 6 bits No-op. */
183
+ cudaEglColorFormatBayer12BCCR = 77, /**< Bayer12 format - one channel in one surface with interleaved BCCR ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
184
+ cudaEglColorFormatBayer12RCCB = 78, /**< Bayer12 format - one channel in one surface with interleaved RCCB ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
185
+ cudaEglColorFormatBayer12CRBC = 79, /**< Bayer12 format - one channel in one surface with interleaved CRBC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
186
+ cudaEglColorFormatBayer12CBRC = 80, /**< Bayer12 format - one channel in one surface with interleaved CBRC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
187
+ cudaEglColorFormatBayer12CCCC = 81, /**< Bayer12 format - one channel in one surface with interleaved CCCC ordering. Out of 16 bits, 12 bits used 4 bits No-op. */
188
+ cudaEglColorFormatY = 82, /**< Color format for single Y plane. */
189
+ cudaEglColorFormatYUV420SemiPlanar_2020 = 83, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
190
+ cudaEglColorFormatYVU420SemiPlanar_2020 = 84, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
191
+ cudaEglColorFormatYUV420Planar_2020 = 85, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
192
+ cudaEglColorFormatYVU420Planar_2020 = 86, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
193
+ cudaEglColorFormatYUV420SemiPlanar_709 = 87, /**< Y, UV in two surfaces (UV as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
194
+ cudaEglColorFormatYVU420SemiPlanar_709 = 88, /**< Y, VU in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
195
+ cudaEglColorFormatYUV420Planar_709 = 89, /**< Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
196
+ cudaEglColorFormatYVU420Planar_709 = 90, /**< Y, V, U in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
197
+ cudaEglColorFormatY10V10U10_420SemiPlanar_709 = 91, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
198
+ cudaEglColorFormatY10V10U10_420SemiPlanar_2020 = 92, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
199
+ cudaEglColorFormatY10V10U10_422SemiPlanar_2020 = 93, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
200
+ cudaEglColorFormatY10V10U10_422SemiPlanar = 94, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
201
+ cudaEglColorFormatY10V10U10_422SemiPlanar_709 = 95, /**< Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = Y height. */
202
+ cudaEglColorFormatY_ER = 96, /**< Extended Range Color format for single Y plane. */
203
+ cudaEglColorFormatY_709_ER = 97, /**< Extended Range Color format for single Y plane. */
204
+ cudaEglColorFormatY10_ER = 98, /**< Extended Range Color format for single Y10 plane. */
205
+ cudaEglColorFormatY10_709_ER = 99, /**< Extended Range Color format for single Y10 plane. */
206
+ cudaEglColorFormatY12_ER = 100, /**< Extended Range Color format for single Y12 plane. */
207
+ cudaEglColorFormatY12_709_ER = 101, /**< Extended Range Color format for single Y12 plane. */
208
+ cudaEglColorFormatYUVA = 102, /**< Y, U, V, A four channels in one surface, interleaved as AVUY. */
209
+ cudaEglColorFormatYVYU = 104, /**< Y, U, V in one surface, interleaved as YVYU in one channel. */
210
+ cudaEglColorFormatVYUY = 105, /**< Y, U, V in one surface, interleaved as VYUY in one channel. */
211
+ cudaEglColorFormatY10V10U10_420SemiPlanar_ER = 106, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
212
+ cudaEglColorFormatY10V10U10_420SemiPlanar_709_ER = 107, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
213
+ cudaEglColorFormatY10V10U10_444SemiPlanar_ER = 108, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
214
+ cudaEglColorFormatY10V10U10_444SemiPlanar_709_ER = 109, /**< Extended Range Y10, V10U10 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
215
+ cudaEglColorFormatY12V12U12_420SemiPlanar_ER = 110, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
216
+ cudaEglColorFormatY12V12U12_420SemiPlanar_709_ER = 111, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = 1/2 Y width, U/V height = 1/2 Y height. */
217
+ cudaEglColorFormatY12V12U12_444SemiPlanar_ER = 112, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
218
+ cudaEglColorFormatY12V12U12_444SemiPlanar_709_ER = 113, /**< Extended Range Y12, V12U12 in two surfaces (VU as one surface) U/V width = Y width, U/V height = Y height. */
219
+ } cudaEglColorFormat;
220
+
221
+ /**
222
+ * CUDA EGL Plane Descriptor - structure defining each plane of a CUDA EGLFrame
223
+ */
224
+ typedef struct cudaEglPlaneDesc_st {
225
+ unsigned int width; /**< Width of plane */
226
+ unsigned int height; /**< Height of plane */
227
+ unsigned int depth; /**< Depth of plane */
228
+ unsigned int pitch; /**< Pitch of plane */
229
+ unsigned int numChannels; /**< Number of channels for the plane */
230
+ struct cudaChannelFormatDesc channelDesc; /**< Channel Format Descriptor */
231
+ unsigned int reserved[4]; /**< Reserved for future use */
232
+ } cudaEglPlaneDesc;
233
+
234
+ /**
235
+ * CUDA EGLFrame Descriptor - structure defining one frame of EGL.
236
+ *
237
+ * Each frame may contain one or more planes depending on whether the surface is Multiplanar or not.
238
+ * Each plane of EGLFrame is represented by ::cudaEglPlaneDesc which is defined as:
239
+ * \code
240
+ * typedef struct cudaEglPlaneDesc_st {
241
+ * unsigned int width;
242
+ * unsigned int height;
243
+ * unsigned int depth;
244
+ * unsigned int pitch;
245
+ * unsigned int numChannels;
246
+ * struct cudaChannelFormatDesc channelDesc;
247
+ * unsigned int reserved[4];
248
+ * } cudaEglPlaneDesc;
249
+ * \endcode
250
+
251
+ */
252
+ typedef struct cudaEglFrame_st {
253
+ union {
254
+ cudaArray_t pArray[CUDA_EGL_MAX_PLANES]; /**< Array of CUDA arrays corresponding to each plane*/
255
+ struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES]; /**< Array of Pointers corresponding to each plane*/
256
+ } frame;
257
+ cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES]; /**< CUDA EGL Plane Descriptor ::cudaEglPlaneDesc*/
258
+ unsigned int planeCount; /**< Number of planes */
259
+ cudaEglFrameType frameType; /**< Array or Pitch */
260
+ cudaEglColorFormat eglColorFormat; /**< CUDA EGL Color Format*/
261
+ } cudaEglFrame;
262
+
263
+ /**
264
+ * CUDA EGLSream Connection
265
+ */
266
+ typedef struct CUeglStreamConnection_st *cudaEglStreamConnection;
267
+
268
+ /** @} */ /* END CUDART_TYPES */
269
+
270
+ /**
271
+ * \addtogroup CUDART_EGL EGL Interoperability
272
+ * This section describes the EGL interoperability functions of the CUDA
273
+ * runtime application programming interface.
274
+ *
275
+ * @{
276
+ */
277
+
278
+ /**
279
+ * \brief Registers an EGL image
280
+ *
281
+ * Registers the EGLImageKHR specified by \p image for access by
282
+ * CUDA. A handle to the registered object is returned as \p pCudaResource.
283
+ * Additional Mapping/Unmapping is not required for the registered resource and
284
+ * ::cudaGraphicsResourceGetMappedEglFrame can be directly called on the \p pCudaResource.
285
+ *
286
+ * The application will be responsible for synchronizing access to shared objects.
287
+ * The application must ensure that any pending operation which access the objects have completed
288
+ * before passing control to CUDA. This may be accomplished by issuing and waiting for
289
+ * glFinish command on all GLcontexts (for OpenGL and likewise for other APIs).
290
+ * The application will be also responsible for ensuring that any pending operation on the
291
+ * registered CUDA resource has completed prior to executing subsequent commands in other APIs
292
+ * accesing the same memory objects.
293
+ * This can be accomplished by calling cuCtxSynchronize or cuEventSynchronize (preferably).
294
+ *
295
+ * The surface's intended usage is specified using \p flags, as follows:
296
+ *
297
+ * - ::cudaGraphicsRegisterFlagsNone: Specifies no hints about how this
298
+ * resource will be used. It is therefore assumed that this resource will be
299
+ * read from and written to by CUDA. This is the default value.
300
+ * - ::cudaGraphicsRegisterFlagsReadOnly: Specifies that CUDA
301
+ * will not write to this resource.
302
+ * - ::cudaGraphicsRegisterFlagsWriteDiscard: Specifies that
303
+ * CUDA will not read from this resource and will write over the
304
+ * entire contents of the resource, so none of the data previously
305
+ * stored in the resource will be preserved.
306
+ *
307
+ * The EGLImageKHR is an object which can be used to create EGLImage target resource. It is defined as a void pointer.
308
+ * typedef void* EGLImageKHR
309
+ *
310
+ * \param pCudaResource - Pointer to the returned object handle
311
+ * \param image - An EGLImageKHR image which can be used to create target resource.
312
+ * \param flags - Map flags
313
+ *
314
+ * \return
315
+ * ::cudaSuccess,
316
+ * ::cudaErrorInvalidResourceHandle,
317
+ * ::cudaErrorInvalidValue,
318
+ * ::cudaErrorUnknown
319
+ *
320
+ * \sa
321
+ * ::cudaGraphicsUnregisterResource,
322
+ * ::cudaGraphicsResourceGetMappedEglFrame,
323
+ * ::cuGraphicsEGLRegisterImage
324
+ */
325
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsEGLRegisterImage(struct cudaGraphicsResource **pCudaResource, EGLImageKHR image, unsigned int flags);
326
+
327
+ /**
328
+ * \brief Connect CUDA to EGLStream as a consumer.
329
+ *
330
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p eglStream.
331
+ *
332
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
333
+ * API to another.
334
+ *
335
+ * \param conn - Pointer to the returned connection handle
336
+ * \param eglStream - EGLStreamKHR handle
337
+ *
338
+ * \return
339
+ * ::cudaSuccess,
340
+ * ::cudaErrorInvalidValue,
341
+ * ::cudaErrorUnknown
342
+ *
343
+ * \sa
344
+ * ::cudaEGLStreamConsumerDisconnect,
345
+ * ::cudaEGLStreamConsumerAcquireFrame,
346
+ * ::cudaEGLStreamConsumerReleaseFrame,
347
+ * ::cuEGLStreamConsumerConnect
348
+ */
349
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnect(cudaEglStreamConnection *conn, EGLStreamKHR eglStream);
350
+
351
+ /**
352
+ * \brief Connect CUDA to EGLStream as a consumer with given flags.
353
+ *
354
+ * Connect CUDA as a consumer to EGLStreamKHR specified by \p stream with specified \p flags defined by
355
+ * ::cudaEglResourceLocationFlags.
356
+ *
357
+ * The flags specify whether the consumer wants to access frames from system memory or video memory.
358
+ * Default is ::cudaEglResourceLocationVidmem.
359
+ *
360
+ * \param conn - Pointer to the returned connection handle
361
+ * \param eglStream - EGLStreamKHR handle
362
+ * \param flags - Flags denote intended location - system or video.
363
+ *
364
+ * \return
365
+ * ::cudaSuccess,
366
+ * ::cudaErrorInvalidValue,
367
+ * ::cudaErrorUnknown
368
+ *
369
+ * \sa
370
+ * ::cudaEGLStreamConsumerDisconnect,
371
+ * ::cudaEGLStreamConsumerAcquireFrame,
372
+ * ::cudaEGLStreamConsumerReleaseFrame,
373
+ * ::cuEGLStreamConsumerConnectWithFlags
374
+ */
375
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerConnectWithFlags(cudaEglStreamConnection *conn, EGLStreamKHR eglStream, unsigned int flags);
376
+
377
+ /**
378
+ * \brief Disconnect CUDA as a consumer to EGLStream .
379
+ *
380
+ * Disconnect CUDA as a consumer to EGLStreamKHR.
381
+ *
382
+ * \param conn - Conection to disconnect.
383
+ *
384
+ * \return
385
+ * ::cudaSuccess,
386
+ * ::cudaErrorInvalidValue,
387
+ * ::cudaErrorUnknown
388
+ *
389
+ * \sa
390
+ * ::cudaEGLStreamConsumerConnect,
391
+ * ::cudaEGLStreamConsumerAcquireFrame,
392
+ * ::cudaEGLStreamConsumerReleaseFrame,
393
+ * ::cuEGLStreamConsumerDisconnect
394
+ */
395
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerDisconnect(cudaEglStreamConnection *conn);
396
+
397
+ /**
398
+ * \brief Acquire an image frame from the EGLStream with CUDA as a consumer.
399
+ *
400
+ * Acquire an image frame from EGLStreamKHR.
401
+ * ::cudaGraphicsResourceGetMappedEglFrame can be called on \p pCudaResource to get
402
+ * ::cudaEglFrame.
403
+ *
404
+ * \param conn - Connection on which to acquire
405
+ * \param pCudaResource - CUDA resource on which the EGLStream frame will be mapped for use.
406
+ * \param pStream - CUDA stream for synchronization and any data migrations
407
+ * implied by ::cudaEglResourceLocationFlags.
408
+ * \param timeout - Desired timeout in usec.
409
+ *
410
+ * \return
411
+ * ::cudaSuccess,
412
+ * ::cudaErrorInvalidValue,
413
+ * ::cudaErrorUnknown,
414
+ * ::cudaErrorLaunchTimeout
415
+ *
416
+ * \sa
417
+ * ::cudaEGLStreamConsumerConnect,
418
+ * ::cudaEGLStreamConsumerDisconnect,
419
+ * ::cudaEGLStreamConsumerReleaseFrame,
420
+ * ::cuEGLStreamConsumerAcquireFrame
421
+ */
422
+
423
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerAcquireFrame(cudaEglStreamConnection *conn,
424
+ cudaGraphicsResource_t *pCudaResource, cudaStream_t *pStream, unsigned int timeout);
425
+ /**
426
+ * \brief Releases the last frame acquired from the EGLStream.
427
+ *
428
+ * Release the acquired image frame specified by \p pCudaResource to EGLStreamKHR.
429
+ *
430
+ * \param conn - Connection on which to release
431
+ * \param pCudaResource - CUDA resource whose corresponding frame is to be released
432
+ * \param pStream - CUDA stream on which release will be done.
433
+ *
434
+ * \return
435
+ * ::cudaSuccess,
436
+ * ::cudaErrorInvalidValue,
437
+ * ::cudaErrorUnknown
438
+ *
439
+ * \sa
440
+ * ::cudaEGLStreamConsumerConnect,
441
+ * ::cudaEGLStreamConsumerDisconnect,
442
+ * ::cudaEGLStreamConsumerAcquireFrame,
443
+ * ::cuEGLStreamConsumerReleaseFrame
444
+ */
445
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamConsumerReleaseFrame(cudaEglStreamConnection *conn,
446
+ cudaGraphicsResource_t pCudaResource, cudaStream_t *pStream);
447
+
448
+ /**
449
+ * \brief Connect CUDA to EGLStream as a producer.
450
+ *
451
+ * Connect CUDA as a producer to EGLStreamKHR specified by \p stream.
452
+ *
453
+ * The EGLStreamKHR is an EGL object that transfers a sequence of image frames from one
454
+ * API to another.
455
+ *
456
+ * \param conn - Pointer to the returned connection handle
457
+ * \param eglStream - EGLStreamKHR handle
458
+ * \param width - width of the image to be submitted to the stream
459
+ * \param height - height of the image to be submitted to the stream
460
+ *
461
+ * \return
462
+ * ::cudaSuccess,
463
+ * ::cudaErrorInvalidValue,
464
+ * ::cudaErrorUnknown
465
+ *
466
+ * \sa
467
+ * ::cudaEGLStreamProducerDisconnect,
468
+ * ::cudaEGLStreamProducerPresentFrame,
469
+ * ::cudaEGLStreamProducerReturnFrame,
470
+ * ::cuEGLStreamProducerConnect
471
+ */
472
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerConnect(cudaEglStreamConnection *conn,
473
+ EGLStreamKHR eglStream, EGLint width, EGLint height);
474
+
475
+ /**
476
+ * \brief Disconnect CUDA as a producer to EGLStream .
477
+ *
478
+ * Disconnect CUDA as a producer to EGLStreamKHR.
479
+ *
480
+ * \param conn - Conection to disconnect.
481
+ *
482
+ * \return
483
+ * ::cudaSuccess,
484
+ * ::cudaErrorInvalidValue,
485
+ * ::cudaErrorUnknown
486
+ *
487
+ * \sa
488
+ * ::cudaEGLStreamProducerConnect,
489
+ * ::cudaEGLStreamProducerPresentFrame,
490
+ * ::cudaEGLStreamProducerReturnFrame,
491
+ * ::cuEGLStreamProducerDisconnect
492
+ */
493
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerDisconnect(cudaEglStreamConnection *conn);
494
+
495
+ /**
496
+ * \brief Present a CUDA eglFrame to the EGLStream with CUDA as a producer.
497
+ *
498
+ * The ::cudaEglFrame is defined as:
499
+ * \code
500
+ * typedef struct cudaEglFrame_st {
501
+ * union {
502
+ * cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
503
+ * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
504
+ * } frame;
505
+ * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
506
+ * unsigned int planeCount;
507
+ * cudaEglFrameType frameType;
508
+ * cudaEglColorFormat eglColorFormat;
509
+ * } cudaEglFrame;
510
+ * \endcode
511
+ *
512
+ * For ::cudaEglFrame of type ::cudaEglFrameTypePitch, the application may present sub-region of a memory
513
+ * allocation. In that case, ::cudaPitchedPtr::ptr will specify the start address of the sub-region in
514
+ * the allocation and ::cudaEglPlaneDesc will specify the dimensions of the sub-region.
515
+ *
516
+ * \param conn - Connection on which to present the CUDA array
517
+ * \param eglframe - CUDA Eglstream Proucer Frame handle to be sent to the consumer over EglStream.
518
+ * \param pStream - CUDA stream on which to present the frame.
519
+ *
520
+ * \return
521
+ * ::cudaSuccess,
522
+ * ::cudaErrorInvalidValue,
523
+ * ::cudaErrorUnknown
524
+ *
525
+ * \sa
526
+ * ::cudaEGLStreamProducerConnect,
527
+ * ::cudaEGLStreamProducerDisconnect,
528
+ * ::cudaEGLStreamProducerReturnFrame,
529
+ * ::cuEGLStreamProducerPresentFrame
530
+ */
531
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerPresentFrame(cudaEglStreamConnection *conn,
532
+ cudaEglFrame eglframe, cudaStream_t *pStream);
533
+
534
+ /**
535
+ * \brief Return the CUDA eglFrame to the EGLStream last released by the consumer.
536
+ *
537
+ * This API can potentially return cudaErrorLaunchTimeout if the consumer has not
538
+ * returned a frame to EGL stream. If timeout is returned the application can retry.
539
+ *
540
+ * \param conn - Connection on which to present the CUDA array
541
+ * \param eglframe - CUDA Eglstream Proucer Frame handle returned from the consumer over EglStream.
542
+ * \param pStream - CUDA stream on which to return the frame.
543
+ *
544
+ * \return
545
+ * ::cudaSuccess,
546
+ * ::cudaErrorLaunchTimeout,
547
+ * ::cudaErrorInvalidValue,
548
+ * ::cudaErrorUnknown
549
+ *
550
+ * \sa
551
+ * ::cudaEGLStreamProducerConnect,
552
+ * ::cudaEGLStreamProducerDisconnect,
553
+ * ::cudaEGLStreamProducerPresentFrame,
554
+ * ::cuEGLStreamProducerReturnFrame
555
+ */
556
+ extern __host__ cudaError_t CUDARTAPI cudaEGLStreamProducerReturnFrame(cudaEglStreamConnection *conn,
557
+ cudaEglFrame *eglframe, cudaStream_t *pStream);
558
+
559
+ /**
560
+ * \brief Get an eglFrame through which to access a registered EGL graphics resource.
561
+ *
562
+ * Returns in \p *eglFrame an eglFrame pointer through which the registered graphics resource
563
+ * \p resource may be accessed.
564
+ * This API can only be called for EGL graphics resources.
565
+ *
566
+ * The ::cudaEglFrame is defined as
567
+ * \code
568
+ * typedef struct cudaEglFrame_st {
569
+ * union {
570
+ * cudaArray_t pArray[CUDA_EGL_MAX_PLANES];
571
+ * struct cudaPitchedPtr pPitch[CUDA_EGL_MAX_PLANES];
572
+ * } frame;
573
+ * cudaEglPlaneDesc planeDesc[CUDA_EGL_MAX_PLANES];
574
+ * unsigned int planeCount;
575
+ * cudaEglFrameType frameType;
576
+ * cudaEglColorFormat eglColorFormat;
577
+ * } cudaEglFrame;
578
+ * \endcode
579
+ *
580
+ *
581
+ * \param eglFrame - Returned eglFrame.
582
+ * \param resource - Registered resource to access.
583
+ * \param index - Index for cubemap surfaces.
584
+ * \param mipLevel - Mipmap level for the subresource to access.
585
+ *
586
+ * \return
587
+ * ::cudaSuccess,
588
+ * ::cudaErrorInvalidValue,
589
+ * ::cudaErrorUnknown
590
+ *
591
+ * \note Note that in case of multiplanar \p *eglFrame, pitch of only first plane (unsigned int cudaEglPlaneDesc::pitch) is to be considered by the application.
592
+ *
593
+ * \sa
594
+ * ::cudaGraphicsSubResourceGetMappedArray,
595
+ * ::cudaGraphicsResourceGetMappedPointer,
596
+ * ::cuGraphicsResourceGetMappedEglFrame
597
+ */
598
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsResourceGetMappedEglFrame(cudaEglFrame* eglFrame,
599
+ cudaGraphicsResource_t resource, unsigned int index, unsigned int mipLevel);
600
+
601
+ /**
602
+ * \brief Creates an event from EGLSync object
603
+ *
604
+ * Creates an event *phEvent from an EGLSyncKHR eglSync with the flages specified
605
+ * via \p flags. Valid flags include:
606
+ * - ::cudaEventDefault: Default event creation flag.
607
+ * - ::cudaEventBlockingSync: Specifies that the created event should use blocking
608
+ * synchronization. A CPU thread that uses ::cudaEventSynchronize() to wait on
609
+ * an event created with this flag will block until the event has actually
610
+ * been completed.
611
+ *
612
+ * ::cudaEventRecord and TimingData are not supported for events created from EGLSync.
613
+ *
614
+ * The EGLSyncKHR is an opaque handle to an EGL sync object.
615
+ * typedef void* EGLSyncKHR
616
+ *
617
+ * \param phEvent - Returns newly created event
618
+ * \param eglSync - Opaque handle to EGLSync object
619
+ * \param flags - Event creation flags
620
+ *
621
+ * \return
622
+ * ::cudaSuccess,
623
+ * ::cudaErrorInitializationError,
624
+ * ::cudaErrorInvalidValue,
625
+ * ::cudaErrorLaunchFailure,
626
+ * ::cudaErrorMemoryAllocation
627
+ *
628
+ * \sa
629
+ * ::cudaEventQuery,
630
+ * ::cudaEventSynchronize,
631
+ * ::cudaEventDestroy
632
+ */
633
+ extern __host__ cudaError_t CUDARTAPI cudaEventCreateFromEGLSync(cudaEvent_t *phEvent, EGLSyncKHR eglSync, unsigned int flags);
634
+
635
+ /** @} */ /* END CUDART_EGL */
636
+
637
+ #if defined(__cplusplus)
638
+ }
639
+ #endif /* __cplusplus */
640
+
641
+ #endif /* __CUDA_EGL_INTEROP_H__ */
642
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_occupancy.h ADDED
@@ -0,0 +1,1929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /**
51
+ * CUDA Occupancy Calculator
52
+ *
53
+ * NAME
54
+ *
55
+ * cudaOccMaxActiveBlocksPerMultiprocessor,
56
+ * cudaOccMaxPotentialOccupancyBlockSize,
57
+ * cudaOccMaxPotentialOccupancyBlockSizeVariableSMem
58
+ * cudaOccAvailableDynamicSMemPerBlock
59
+ *
60
+ * DESCRIPTION
61
+ *
62
+ * The CUDA occupancy calculator provides a standalone, programmatical
63
+ * interface to compute the occupancy of a function on a device. It can also
64
+ * provide occupancy-oriented launch configuration suggestions.
65
+ *
66
+ * The function and device are defined by the user through
67
+ * cudaOccFuncAttributes, cudaOccDeviceProp, and cudaOccDeviceState
68
+ * structures. All APIs require all 3 of them.
69
+ *
70
+ * See the structure definition for more details about the device / function
71
+ * descriptors.
72
+ *
73
+ * See each API's prototype for API usage.
74
+ *
75
+ * COMPATIBILITY
76
+ *
77
+ * The occupancy calculator will be updated on each major CUDA toolkit
78
+ * release. It does not provide forward compatibility, i.e. new hardwares
79
+ * released after this implementation's release will not be supported.
80
+ *
81
+ * NOTE
82
+ *
83
+ * If there is access to CUDA runtime, and the sole intent is to calculate
84
+ * occupancy related values on one of the accessible CUDA devices, using CUDA
85
+ * runtime's occupancy calculation APIs is recommended.
86
+ *
87
+ */
88
+
89
+ #ifndef __cuda_occupancy_h__
90
+ #define __cuda_occupancy_h__
91
+
92
+ #include <stddef.h>
93
+ #include <limits.h>
94
+ #include <string.h>
95
+
96
+
97
+ // __OCC_INLINE will be undefined at the end of this header
98
+ //
99
+ #ifdef __CUDACC__
100
+ #define __OCC_INLINE inline __host__ __device__
101
+ #elif defined _MSC_VER
102
+ #define __OCC_INLINE __inline
103
+ #else // GNUCC assumed
104
+ #define __OCC_INLINE inline
105
+ #endif
106
+
107
+ enum cudaOccError_enum {
108
+ CUDA_OCC_SUCCESS = 0, // no error encountered
109
+ CUDA_OCC_ERROR_INVALID_INPUT = 1, // input parameter is invalid
110
+ CUDA_OCC_ERROR_UNKNOWN_DEVICE = 2, // requested device is not supported in
111
+ // current implementation or device is
112
+ // invalid
113
+ };
114
+ typedef enum cudaOccError_enum cudaOccError;
115
+
116
+ typedef struct cudaOccResult cudaOccResult;
117
+ typedef struct cudaOccDeviceProp cudaOccDeviceProp;
118
+ typedef struct cudaOccFuncAttributes cudaOccFuncAttributes;
119
+ typedef struct cudaOccDeviceState cudaOccDeviceState;
120
+
121
+ /**
122
+ * The CUDA occupancy calculator computes the occupancy of the function
123
+ * described by attributes with the given block size (blockSize), static device
124
+ * properties (properties), dynamic device states (states) and per-block dynamic
125
+ * shared memory allocation (dynamicSMemSize) in bytes, and output it through
126
+ * result along with other useful information. The occupancy is computed in
127
+ * terms of the maximum number of active blocks per multiprocessor. The user can
128
+ * then convert it to other metrics, such as number of active warps.
129
+ *
130
+ * RETURN VALUE
131
+ *
132
+ * The occupancy and related information is returned through result.
133
+ *
134
+ * If result->activeBlocksPerMultiprocessor is 0, then the given parameter
135
+ * combination cannot run on the device.
136
+ *
137
+ * ERRORS
138
+ *
139
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
140
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
141
+ * current implementation or device is invalid
142
+ */
143
+ static __OCC_INLINE
144
+ cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
145
+ cudaOccResult *result, // out
146
+ const cudaOccDeviceProp *properties, // in
147
+ const cudaOccFuncAttributes *attributes, // in
148
+ const cudaOccDeviceState *state, // in
149
+ int blockSize, // in
150
+ size_t dynamicSmemSize); // in
151
+
152
+ /**
153
+ * The CUDA launch configurator C API suggests a grid / block size pair (in
154
+ * minGridSize and blockSize) that achieves the best potential occupancy
155
+ * (i.e. maximum number of active warps with the smallest number of blocks) for
156
+ * the given function described by attributes, on a device described by
157
+ * properties with settings in state.
158
+ *
159
+ * If per-block dynamic shared memory allocation is not needed, the user should
160
+ * leave both blockSizeToDynamicSMemSize and dynamicSMemSize as 0.
161
+ *
162
+ * If per-block dynamic shared memory allocation is needed, then if the dynamic
163
+ * shared memory size is constant regardless of block size, the size should be
164
+ * passed through dynamicSMemSize, and blockSizeToDynamicSMemSize should be
165
+ * NULL.
166
+ *
167
+ * Otherwise, if the per-block dynamic shared memory size varies with different
168
+ * block sizes, the user needs to provide a pointer to an unary function through
169
+ * blockSizeToDynamicSMemSize that computes the dynamic shared memory needed by
170
+ * a block of the function for any given block size. dynamicSMemSize is
171
+ * ignored. An example signature is:
172
+ *
173
+ * // Take block size, returns dynamic shared memory needed
174
+ * size_t blockToSmem(int blockSize);
175
+ *
176
+ * RETURN VALUE
177
+ *
178
+ * The suggested block size and the minimum number of blocks needed to achieve
179
+ * the maximum occupancy are returned through blockSize and minGridSize.
180
+ *
181
+ * If *blockSize is 0, then the given combination cannot run on the device.
182
+ *
183
+ * ERRORS
184
+ *
185
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
186
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
187
+ * current implementation or device is invalid
188
+ *
189
+ */
190
+ static __OCC_INLINE
191
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
192
+ int *minGridSize, // out
193
+ int *blockSize, // out
194
+ const cudaOccDeviceProp *properties, // in
195
+ const cudaOccFuncAttributes *attributes, // in
196
+ const cudaOccDeviceState *state, // in
197
+ size_t (*blockSizeToDynamicSMemSize)(int), // in
198
+ size_t dynamicSMemSize); // in
199
+
200
+ /**
201
+ * The CUDA launch configurator C++ API suggests a grid / block size pair (in
202
+ * minGridSize and blockSize) that achieves the best potential occupancy
203
+ * (i.e. the maximum number of active warps with the smallest number of blocks)
204
+ * for the given function described by attributes, on a device described by
205
+ * properties with settings in state.
206
+ *
207
+ * If per-block dynamic shared memory allocation is 0 or constant regardless of
208
+ * block size, the user can use cudaOccMaxPotentialOccupancyBlockSize to
209
+ * configure the launch. A constant dynamic shared memory allocation size in
210
+ * bytes can be passed through dynamicSMemSize.
211
+ *
212
+ * Otherwise, if the per-block dynamic shared memory size varies with different
213
+ * block sizes, the user needs to use
214
+ * cudaOccMaxPotentialOccupancyBlockSizeVariableSmem instead, and provide a
215
+ * functor / pointer to an unary function (blockSizeToDynamicSMemSize) that
216
+ * computes the dynamic shared memory needed by func for any given block
217
+ * size. An example signature is:
218
+ *
219
+ * // Take block size, returns per-block dynamic shared memory needed
220
+ * size_t blockToSmem(int blockSize);
221
+ *
222
+ * RETURN VALUE
223
+ *
224
+ * The suggested block size and the minimum number of blocks needed to achieve
225
+ * the maximum occupancy are returned through blockSize and minGridSize.
226
+ *
227
+ * If *blockSize is 0, then the given combination cannot run on the device.
228
+ *
229
+ * ERRORS
230
+ *
231
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
232
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
233
+ * current implementation or device is invalid
234
+ *
235
+ */
236
+
237
+ #if defined(__cplusplus)
238
+ namespace {
239
+
240
+ __OCC_INLINE
241
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
242
+ int *minGridSize, // out
243
+ int *blockSize, // out
244
+ const cudaOccDeviceProp *properties, // in
245
+ const cudaOccFuncAttributes *attributes, // in
246
+ const cudaOccDeviceState *state, // in
247
+ size_t dynamicSMemSize = 0); // in
248
+
249
+ template <typename UnaryFunction>
250
+ __OCC_INLINE
251
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
252
+ int *minGridSize, // out
253
+ int *blockSize, // out
254
+ const cudaOccDeviceProp *properties, // in
255
+ const cudaOccFuncAttributes *attributes, // in
256
+ const cudaOccDeviceState *state, // in
257
+ UnaryFunction blockSizeToDynamicSMemSize); // in
258
+
259
+ } // namespace anonymous
260
+ #endif // defined(__cplusplus)
261
+
262
+ /**
263
+ *
264
+ * The CUDA dynamic shared memory calculator computes the maximum size of
265
+ * per-block dynamic shared memory if we want to place numBlocks blocks
266
+ * on an SM.
267
+ *
268
+ * RETURN VALUE
269
+ *
270
+ * Returns in *dynamicSmemSize the maximum size of dynamic shared memory to allow
271
+ * numBlocks blocks per SM.
272
+ *
273
+ * ERRORS
274
+ *
275
+ * CUDA_OCC_ERROR_INVALID_INPUT input parameter is invalid.
276
+ * CUDA_OCC_ERROR_UNKNOWN_DEVICE requested device is not supported in
277
+ * current implementation or device is invalid
278
+ *
279
+ */
280
+ static __OCC_INLINE
281
+ cudaOccError cudaOccAvailableDynamicSMemPerBlock(
282
+ size_t *dynamicSmemSize,
283
+ const cudaOccDeviceProp *properties,
284
+ const cudaOccFuncAttributes *attributes,
285
+ const cudaOccDeviceState *state,
286
+ int numBlocks,
287
+ int blockSize);
288
+
289
+ /**
290
+ * Data structures
291
+ *
292
+ * These structures are subject to change for future architecture and CUDA
293
+ * releases. C users should initialize the structure as {0}.
294
+ *
295
+ */
296
+
297
+ /**
298
+ * Device descriptor
299
+ *
300
+ * This structure describes a device.
301
+ */
302
+ struct cudaOccDeviceProp {
303
+ int computeMajor; // Compute capability major version
304
+ int computeMinor; // Compute capability minor
305
+ // version. None supported minor version
306
+ // may cause error
307
+ int maxThreadsPerBlock; // Maximum number of threads per block
308
+ int maxThreadsPerMultiprocessor; // Maximum number of threads per SM
309
+ // i.e. (Max. number of warps) x (warp
310
+ // size)
311
+ int regsPerBlock; // Maximum number of registers per block
312
+ int regsPerMultiprocessor; // Maximum number of registers per SM
313
+ int warpSize; // Warp size
314
+ size_t sharedMemPerBlock; // Maximum shared memory size per block
315
+ size_t sharedMemPerMultiprocessor; // Maximum shared memory size per SM
316
+ int numSms; // Number of SMs available
317
+ size_t sharedMemPerBlockOptin; // Maximum optin shared memory size per block
318
+ size_t reservedSharedMemPerBlock; // Shared memory per block reserved by driver
319
+
320
+ #ifdef __cplusplus
321
+ // This structure can be converted from a cudaDeviceProp structure for users
322
+ // that use this header in their CUDA applications.
323
+ //
324
+ // If the application have access to the CUDA Runtime API, the application
325
+ // can obtain the device properties of a CUDA device through
326
+ // cudaGetDeviceProperties, and initialize a cudaOccDeviceProp with the
327
+ // cudaDeviceProp structure.
328
+ //
329
+ // Example:
330
+ /*
331
+ {
332
+ cudaDeviceProp prop;
333
+
334
+ cudaGetDeviceProperties(&prop, ...);
335
+
336
+ cudaOccDeviceProp occProp = prop;
337
+
338
+ ...
339
+
340
+ cudaOccMaxPotentialOccupancyBlockSize(..., &occProp, ...);
341
+ }
342
+ */
343
+ //
344
+ template<typename DeviceProp>
345
+ __OCC_INLINE
346
+ cudaOccDeviceProp(const DeviceProp &props)
347
+ : computeMajor (props.major),
348
+ computeMinor (props.minor),
349
+ maxThreadsPerBlock (props.maxThreadsPerBlock),
350
+ maxThreadsPerMultiprocessor (props.maxThreadsPerMultiProcessor),
351
+ regsPerBlock (props.regsPerBlock),
352
+ regsPerMultiprocessor (props.regsPerMultiprocessor),
353
+ warpSize (props.warpSize),
354
+ sharedMemPerBlock (props.sharedMemPerBlock),
355
+ sharedMemPerMultiprocessor (props.sharedMemPerMultiprocessor),
356
+ numSms (props.multiProcessorCount),
357
+ sharedMemPerBlockOptin (props.sharedMemPerBlockOptin),
358
+ reservedSharedMemPerBlock (props.reservedSharedMemPerBlock)
359
+ {}
360
+
361
+ __OCC_INLINE
362
+ cudaOccDeviceProp()
363
+ : computeMajor (0),
364
+ computeMinor (0),
365
+ maxThreadsPerBlock (0),
366
+ maxThreadsPerMultiprocessor (0),
367
+ regsPerBlock (0),
368
+ regsPerMultiprocessor (0),
369
+ warpSize (0),
370
+ sharedMemPerBlock (0),
371
+ sharedMemPerMultiprocessor (0),
372
+ numSms (0),
373
+ sharedMemPerBlockOptin (0),
374
+ reservedSharedMemPerBlock (0)
375
+ {}
376
+ #endif // __cplusplus
377
+ };
378
+
379
+ /**
380
+ * Partitioned global caching option
381
+ */
382
+ typedef enum cudaOccPartitionedGCConfig_enum {
383
+ PARTITIONED_GC_OFF, // Disable partitioned global caching
384
+ PARTITIONED_GC_ON, // Prefer partitioned global caching
385
+ PARTITIONED_GC_ON_STRICT // Force partitioned global caching
386
+ } cudaOccPartitionedGCConfig;
387
+
388
+ /**
389
+ * Per function opt in maximum dynamic shared memory limit
390
+ */
391
+ typedef enum cudaOccFuncShmemConfig_enum {
392
+ FUNC_SHMEM_LIMIT_DEFAULT, // Default shmem limit
393
+ FUNC_SHMEM_LIMIT_OPTIN, // Use the optin shmem limit
394
+ } cudaOccFuncShmemConfig;
395
+
396
+ /**
397
+ * Function descriptor
398
+ *
399
+ * This structure describes a CUDA function.
400
+ */
401
+ struct cudaOccFuncAttributes {
402
+ int maxThreadsPerBlock; // Maximum block size the function can work with. If
403
+ // unlimited, use INT_MAX or any value greater than
404
+ // or equal to maxThreadsPerBlock of the device
405
+ int numRegs; // Number of registers used. When the function is
406
+ // launched on device, the register count may change
407
+ // due to internal tools requirements.
408
+ size_t sharedSizeBytes; // Number of static shared memory used
409
+
410
+ cudaOccPartitionedGCConfig partitionedGCConfig;
411
+ // Partitioned global caching is required to enable
412
+ // caching on certain chips, such as sm_52
413
+ // devices. Partitioned global caching can be
414
+ // automatically disabled if the occupancy
415
+ // requirement of the launch cannot support caching.
416
+ //
417
+ // To override this behavior with caching on and
418
+ // calculate occupancy strictly according to the
419
+ // preference, set partitionedGCConfig to
420
+ // PARTITIONED_GC_ON_STRICT. This is especially
421
+ // useful for experimenting and finding launch
422
+ // configurations (MaxPotentialOccupancyBlockSize)
423
+ // that allow global caching to take effect.
424
+ //
425
+ // This flag only affects the occupancy calculation.
426
+
427
+ cudaOccFuncShmemConfig shmemLimitConfig;
428
+ // Certain chips like sm_70 allow a user to opt into
429
+ // a higher per block limit of dynamic shared memory
430
+ // This optin is performed on a per function basis
431
+ // using the cuFuncSetAttribute function
432
+
433
+ size_t maxDynamicSharedSizeBytes;
434
+ // User set limit on maximum dynamic shared memory
435
+ // usable by the kernel
436
+ // This limit is set using the cuFuncSetAttribute
437
+ // function.
438
+ #ifdef __cplusplus
439
+ // This structure can be converted from a cudaFuncAttributes structure for
440
+ // users that use this header in their CUDA applications.
441
+ //
442
+ // If the application have access to the CUDA Runtime API, the application
443
+ // can obtain the function attributes of a CUDA kernel function through
444
+ // cudaFuncGetAttributes, and initialize a cudaOccFuncAttributes with the
445
+ // cudaFuncAttributes structure.
446
+ //
447
+ // Example:
448
+ /*
449
+ __global__ void foo() {...}
450
+
451
+ ...
452
+
453
+ {
454
+ cudaFuncAttributes attr;
455
+
456
+ cudaFuncGetAttributes(&attr, foo);
457
+
458
+ cudaOccFuncAttributes occAttr = attr;
459
+
460
+ ...
461
+
462
+ cudaOccMaxPotentialOccupancyBlockSize(..., &occAttr, ...);
463
+ }
464
+ */
465
+ //
466
+ template<typename FuncAttributes>
467
+ __OCC_INLINE
468
+ cudaOccFuncAttributes(const FuncAttributes &attr)
469
+ : maxThreadsPerBlock (attr.maxThreadsPerBlock),
470
+ numRegs (attr.numRegs),
471
+ sharedSizeBytes (attr.sharedSizeBytes),
472
+ partitionedGCConfig (PARTITIONED_GC_OFF),
473
+ shmemLimitConfig (FUNC_SHMEM_LIMIT_OPTIN),
474
+ maxDynamicSharedSizeBytes (attr.maxDynamicSharedSizeBytes)
475
+ {}
476
+
477
+ __OCC_INLINE
478
+ cudaOccFuncAttributes()
479
+ : maxThreadsPerBlock (0),
480
+ numRegs (0),
481
+ sharedSizeBytes (0),
482
+ partitionedGCConfig (PARTITIONED_GC_OFF),
483
+ shmemLimitConfig (FUNC_SHMEM_LIMIT_DEFAULT),
484
+ maxDynamicSharedSizeBytes (0)
485
+ {}
486
+ #endif
487
+ };
488
+
489
+ typedef enum cudaOccCacheConfig_enum {
490
+ CACHE_PREFER_NONE = 0x00, // no preference for shared memory or L1 (default)
491
+ CACHE_PREFER_SHARED = 0x01, // prefer larger shared memory and smaller L1 cache
492
+ CACHE_PREFER_L1 = 0x02, // prefer larger L1 cache and smaller shared memory
493
+ CACHE_PREFER_EQUAL = 0x03 // prefer equal sized L1 cache and shared memory
494
+ } cudaOccCacheConfig;
495
+
496
+ typedef enum cudaOccCarveoutConfig_enum {
497
+ SHAREDMEM_CARVEOUT_DEFAULT = -1, // no preference for shared memory or L1 (default)
498
+ SHAREDMEM_CARVEOUT_MAX_SHARED = 100, // prefer maximum available shared memory, minimum L1 cache
499
+ SHAREDMEM_CARVEOUT_MAX_L1 = 0, // prefer maximum available L1 cache, minimum shared memory
500
+ SHAREDMEM_CARVEOUT_HALF = 50 // prefer half of maximum available shared memory, with the rest as L1 cache
501
+ } cudaOccCarveoutConfig;
502
+
503
+ /**
504
+ * Device state descriptor
505
+ *
506
+ * This structure describes device settings that affect occupancy calculation.
507
+ */
508
+ struct cudaOccDeviceState
509
+ {
510
+ // Cache / shared memory split preference. Deprecated on Volta
511
+ cudaOccCacheConfig cacheConfig;
512
+ // Shared memory / L1 split preference. Supported on only Volta
513
+ int carveoutConfig;
514
+
515
+ #ifdef __cplusplus
516
+ __OCC_INLINE
517
+ cudaOccDeviceState()
518
+ : cacheConfig (CACHE_PREFER_NONE),
519
+ carveoutConfig (SHAREDMEM_CARVEOUT_DEFAULT)
520
+ {}
521
+ #endif
522
+ };
523
+
524
+ typedef enum cudaOccLimitingFactor_enum {
525
+ // Occupancy limited due to:
526
+ OCC_LIMIT_WARPS = 0x01, // - warps available
527
+ OCC_LIMIT_REGISTERS = 0x02, // - registers available
528
+ OCC_LIMIT_SHARED_MEMORY = 0x04, // - shared memory available
529
+ OCC_LIMIT_BLOCKS = 0x08 // - blocks available
530
+ } cudaOccLimitingFactor;
531
+
532
+ /**
533
+ * Occupancy output
534
+ *
535
+ * This structure contains occupancy calculator's output.
536
+ */
537
+ struct cudaOccResult {
538
+ int activeBlocksPerMultiprocessor; // Occupancy
539
+ unsigned int limitingFactors; // Factors that limited occupancy. A bit
540
+ // field that counts the limiting
541
+ // factors, see cudaOccLimitingFactor
542
+ int blockLimitRegs; // Occupancy due to register
543
+ // usage, INT_MAX if the kernel does not
544
+ // use any register.
545
+ int blockLimitSharedMem; // Occupancy due to shared memory
546
+ // usage, INT_MAX if the kernel does not
547
+ // use shared memory.
548
+ int blockLimitWarps; // Occupancy due to block size limit
549
+ int blockLimitBlocks; // Occupancy due to maximum number of blocks
550
+ // managable per SM
551
+ int allocatedRegistersPerBlock; // Actual number of registers allocated per
552
+ // block
553
+ size_t allocatedSharedMemPerBlock; // Actual size of shared memory allocated
554
+ // per block
555
+ cudaOccPartitionedGCConfig partitionedGCConfig;
556
+ // Report if partitioned global caching
557
+ // is actually enabled.
558
+ };
559
+
560
+ /**
561
+ * Partitioned global caching support
562
+ *
563
+ * See cudaOccPartitionedGlobalCachingModeSupport
564
+ */
565
+ typedef enum cudaOccPartitionedGCSupport_enum {
566
+ PARTITIONED_GC_NOT_SUPPORTED, // Partitioned global caching is not supported
567
+ PARTITIONED_GC_SUPPORTED, // Partitioned global caching is supported
568
+ } cudaOccPartitionedGCSupport;
569
+
570
+ /**
571
+ * Implementation
572
+ */
573
+
574
+ /**
575
+ * Max compute capability supported
576
+ */
577
+
578
+
579
+
580
+
581
+
582
+
583
+
584
+ #define __CUDA_OCC_MAJOR__ 8
585
+ #define __CUDA_OCC_MINOR__ 6
586
+
587
+
588
+ //////////////////////////////////////////
589
+ // Mathematical Helper Functions //
590
+ //////////////////////////////////////////
591
+
592
+ static __OCC_INLINE int __occMin(int lhs, int rhs)
593
+ {
594
+ return rhs < lhs ? rhs : lhs;
595
+ }
596
+
597
+ static __OCC_INLINE int __occDivideRoundUp(int x, int y)
598
+ {
599
+ return (x + (y - 1)) / y;
600
+ }
601
+
602
+ static __OCC_INLINE int __occRoundUp(int x, int y)
603
+ {
604
+ return y * __occDivideRoundUp(x, y);
605
+ }
606
+
607
+ //////////////////////////////////////////
608
+ // Architectural Properties //
609
+ //////////////////////////////////////////
610
+
611
+ /**
612
+ * Granularity of shared memory allocation
613
+ */
614
+ static __OCC_INLINE cudaOccError cudaOccSMemAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
615
+ {
616
+ int value;
617
+
618
+ switch(properties->computeMajor) {
619
+ case 3:
620
+ case 5:
621
+ case 6:
622
+ case 7:
623
+ value = 256;
624
+ break;
625
+ case 8:
626
+
627
+
628
+
629
+ value = 128;
630
+ break;
631
+ default:
632
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
633
+ }
634
+
635
+ *limit = value;
636
+
637
+ return CUDA_OCC_SUCCESS;
638
+ }
639
+
640
+ /**
641
+ * Maximum number of registers per thread
642
+ */
643
+ static __OCC_INLINE cudaOccError cudaOccRegAllocationMaxPerThread(int *limit, const cudaOccDeviceProp *properties)
644
+ {
645
+ int value;
646
+
647
+ switch(properties->computeMajor) {
648
+ case 3:
649
+ case 5:
650
+ case 6:
651
+ value = 255;
652
+ break;
653
+ case 7:
654
+ case 8:
655
+
656
+
657
+
658
+ value = 256;
659
+ break;
660
+ default:
661
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
662
+ }
663
+
664
+ *limit = value;
665
+
666
+ return CUDA_OCC_SUCCESS;
667
+ }
668
+
669
+ /**
670
+ * Granularity of register allocation
671
+ */
672
+ static __OCC_INLINE cudaOccError cudaOccRegAllocationGranularity(int *limit, const cudaOccDeviceProp *properties)
673
+ {
674
+ int value;
675
+
676
+ switch(properties->computeMajor) {
677
+ case 3:
678
+ case 5:
679
+ case 6:
680
+ case 7:
681
+ case 8:
682
+
683
+
684
+
685
+ value = 256;
686
+ break;
687
+ default:
688
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
689
+ }
690
+
691
+ *limit = value;
692
+
693
+ return CUDA_OCC_SUCCESS;
694
+ }
695
+
696
+ /**
697
+ * Number of sub-partitions
698
+ */
699
+ static __OCC_INLINE cudaOccError cudaOccSubPartitionsPerMultiprocessor(int *limit, const cudaOccDeviceProp *properties)
700
+ {
701
+ int value;
702
+
703
+ switch(properties->computeMajor) {
704
+ case 3:
705
+ case 5:
706
+ case 7:
707
+ case 8:
708
+
709
+
710
+
711
+ value = 4;
712
+ break;
713
+ case 6:
714
+ value = properties->computeMinor ? 4 : 2;
715
+ break;
716
+ default:
717
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
718
+ }
719
+
720
+ *limit = value;
721
+
722
+ return CUDA_OCC_SUCCESS;
723
+ }
724
+
725
+
726
+ /**
727
+ * Maximum number of blocks that can run simultaneously on a multiprocessor
728
+ */
729
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerMultiprocessor(int* limit, const cudaOccDeviceProp *properties)
730
+ {
731
+ int value;
732
+
733
+ switch(properties->computeMajor) {
734
+ case 3:
735
+ value = 16;
736
+ break;
737
+ case 5:
738
+ case 6:
739
+ value = 32;
740
+ break;
741
+ case 7: {
742
+ int isTuring = properties->computeMinor == 5;
743
+ value = (isTuring) ? 16 : 32;
744
+ break;
745
+ }
746
+ case 8:
747
+ if (properties->computeMinor == 0) {
748
+ value = 32;
749
+ }
750
+
751
+
752
+
753
+
754
+
755
+
756
+ else {
757
+ value = 16;
758
+ }
759
+ break;
760
+
761
+
762
+
763
+
764
+
765
+ default:
766
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
767
+ }
768
+
769
+ *limit = value;
770
+
771
+ return CUDA_OCC_SUCCESS;
772
+ }
773
+
774
+ /**
775
+ * Align up shared memory based on compute major configurations
776
+ */
777
+ static __OCC_INLINE cudaOccError cudaOccAlignUpShmemSizeVoltaPlus(size_t *shMemSize, const cudaOccDeviceProp *properties)
778
+ {
779
+ // Volta and Turing have shared L1 cache / shared memory, and support cache
780
+ // configuration to trade one for the other. These values are needed to
781
+ // map carveout config ratio to the next available architecture size
782
+ size_t size = *shMemSize;
783
+
784
+ switch (properties->computeMajor) {
785
+ case 7: {
786
+ // Turing supports 32KB and 64KB shared mem.
787
+ int isTuring = properties->computeMinor == 5;
788
+ if (isTuring) {
789
+ if (size <= 32 * 1024) {
790
+ *shMemSize = 32 * 1024;
791
+ }
792
+ else if (size <= 64 * 1024) {
793
+ *shMemSize = 64 * 1024;
794
+ }
795
+ else {
796
+ return CUDA_OCC_ERROR_INVALID_INPUT;
797
+ }
798
+ }
799
+ // Volta supports 0KB, 8KB, 16KB, 32KB, 64KB, and 96KB shared mem.
800
+ else {
801
+ if (size == 0) {
802
+ *shMemSize = 0;
803
+ }
804
+ else if (size <= 8 * 1024) {
805
+ *shMemSize = 8 * 1024;
806
+ }
807
+ else if (size <= 16 * 1024) {
808
+ *shMemSize = 16 * 1024;
809
+ }
810
+ else if (size <= 32 * 1024) {
811
+ *shMemSize = 32 * 1024;
812
+ }
813
+ else if (size <= 64 * 1024) {
814
+ *shMemSize = 64 * 1024;
815
+ }
816
+ else if (size <= 96 * 1024) {
817
+ *shMemSize = 96 * 1024;
818
+ }
819
+ else {
820
+ return CUDA_OCC_ERROR_INVALID_INPUT;
821
+ }
822
+ }
823
+ break;
824
+ }
825
+ case 8:
826
+ if (properties->computeMinor == 0 || properties->computeMinor == 7) {
827
+ if (size == 0) {
828
+ *shMemSize = 0;
829
+ }
830
+ else if (size <= 8 * 1024) {
831
+ *shMemSize = 8 * 1024;
832
+ }
833
+ else if (size <= 16 * 1024) {
834
+ *shMemSize = 16 * 1024;
835
+ }
836
+ else if (size <= 32 * 1024) {
837
+ *shMemSize = 32 * 1024;
838
+ }
839
+ else if (size <= 64 * 1024) {
840
+ *shMemSize = 64 * 1024;
841
+ }
842
+ else if (size <= 100 * 1024) {
843
+ *shMemSize = 100 * 1024;
844
+ }
845
+ else if (size <= 132 * 1024) {
846
+ *shMemSize = 132 * 1024;
847
+ }
848
+ else if (size <= 164 * 1024) {
849
+ *shMemSize = 164 * 1024;
850
+ }
851
+ else {
852
+ return CUDA_OCC_ERROR_INVALID_INPUT;
853
+ }
854
+ }
855
+ else {
856
+ if (size == 0) {
857
+ *shMemSize = 0;
858
+ }
859
+ else if (size <= 8 * 1024) {
860
+ *shMemSize = 8 * 1024;
861
+ }
862
+ else if (size <= 16 * 1024) {
863
+ *shMemSize = 16 * 1024;
864
+ }
865
+ else if (size <= 32 * 1024) {
866
+ *shMemSize = 32 * 1024;
867
+ }
868
+ else if (size <= 64 * 1024) {
869
+ *shMemSize = 64 * 1024;
870
+ }
871
+ else if (size <= 100 * 1024) {
872
+ *shMemSize = 100 * 1024;
873
+ }
874
+ else {
875
+ return CUDA_OCC_ERROR_INVALID_INPUT;
876
+ }
877
+ }
878
+ break;
879
+
880
+
881
+
882
+
883
+
884
+
885
+
886
+
887
+
888
+
889
+
890
+
891
+
892
+
893
+
894
+
895
+
896
+
897
+
898
+
899
+
900
+
901
+
902
+
903
+
904
+
905
+
906
+
907
+
908
+
909
+
910
+
911
+
912
+
913
+
914
+
915
+
916
+
917
+ default:
918
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
919
+ }
920
+
921
+ return CUDA_OCC_SUCCESS;
922
+ }
923
+
924
+ /**
925
+ * Shared memory based on the new carveoutConfig API introduced with Volta
926
+ */
927
+ static __OCC_INLINE cudaOccError cudaOccSMemPreferenceVoltaPlus(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
928
+ {
929
+ cudaOccError status = CUDA_OCC_SUCCESS;
930
+ size_t preferenceShmemSize;
931
+
932
+ // CUDA 9.0 introduces a new API to set shared memory - L1 configuration on supported
933
+ // devices. This preference will take precedence over the older cacheConfig setting.
934
+ // Map cacheConfig to its effective preference value.
935
+ int effectivePreference = state->carveoutConfig;
936
+ if ((effectivePreference < SHAREDMEM_CARVEOUT_DEFAULT) || (effectivePreference > SHAREDMEM_CARVEOUT_MAX_SHARED)) {
937
+ return CUDA_OCC_ERROR_INVALID_INPUT;
938
+ }
939
+
940
+ if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
941
+ switch (state->cacheConfig)
942
+ {
943
+ case CACHE_PREFER_L1:
944
+ effectivePreference = SHAREDMEM_CARVEOUT_MAX_L1;
945
+ break;
946
+ case CACHE_PREFER_SHARED:
947
+ effectivePreference = SHAREDMEM_CARVEOUT_MAX_SHARED;
948
+ break;
949
+ case CACHE_PREFER_EQUAL:
950
+ effectivePreference = SHAREDMEM_CARVEOUT_HALF;
951
+ break;
952
+ default:
953
+ effectivePreference = SHAREDMEM_CARVEOUT_DEFAULT;
954
+ break;
955
+ }
956
+ }
957
+
958
+ if (effectivePreference == SHAREDMEM_CARVEOUT_DEFAULT) {
959
+ preferenceShmemSize = properties->sharedMemPerMultiprocessor;
960
+ }
961
+ else {
962
+ preferenceShmemSize = (size_t) (effectivePreference * properties->sharedMemPerMultiprocessor) / 100;
963
+ }
964
+
965
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&preferenceShmemSize, properties);
966
+ *limit = preferenceShmemSize;
967
+ return status;
968
+ }
969
+
970
+ /**
971
+ * Shared memory based on the cacheConfig
972
+ */
973
+ static __OCC_INLINE cudaOccError cudaOccSMemPreference(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
974
+ {
975
+ size_t bytes = 0;
976
+ size_t sharedMemPerMultiprocessorHigh = properties->sharedMemPerMultiprocessor;
977
+ cudaOccCacheConfig cacheConfig = state->cacheConfig;
978
+
979
+ // Kepler has shared L1 cache / shared memory, and support cache
980
+ // configuration to trade one for the other. These values are needed to
981
+ // calculate the correct shared memory size for user requested cache
982
+ // configuration.
983
+ //
984
+ size_t minCacheSize = 16384;
985
+ size_t maxCacheSize = 49152;
986
+ size_t cacheAndSharedTotal = sharedMemPerMultiprocessorHigh + minCacheSize;
987
+ size_t sharedMemPerMultiprocessorLow = cacheAndSharedTotal - maxCacheSize;
988
+
989
+ switch (properties->computeMajor) {
990
+ case 3:
991
+ // Kepler supports 16KB, 32KB, or 48KB partitions for L1. The rest
992
+ // is shared memory.
993
+ //
994
+ switch (cacheConfig) {
995
+ default :
996
+ case CACHE_PREFER_NONE:
997
+ case CACHE_PREFER_SHARED:
998
+ bytes = sharedMemPerMultiprocessorHigh;
999
+ break;
1000
+ case CACHE_PREFER_L1:
1001
+ bytes = sharedMemPerMultiprocessorLow;
1002
+ break;
1003
+ case CACHE_PREFER_EQUAL:
1004
+ // Equal is the mid-point between high and low. It should be
1005
+ // equivalent to low + 16KB.
1006
+ //
1007
+ bytes = (sharedMemPerMultiprocessorHigh + sharedMemPerMultiprocessorLow) / 2;
1008
+ break;
1009
+ }
1010
+ break;
1011
+ case 5:
1012
+ case 6:
1013
+ // Maxwell and Pascal have dedicated shared memory.
1014
+ //
1015
+ bytes = sharedMemPerMultiprocessorHigh;
1016
+ break;
1017
+ default:
1018
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
1019
+ }
1020
+
1021
+ *limit = bytes;
1022
+
1023
+ return CUDA_OCC_SUCCESS;
1024
+ }
1025
+
1026
+ /**
1027
+ * Shared memory based on config requested by User
1028
+ */
1029
+ static __OCC_INLINE cudaOccError cudaOccSMemPerMultiprocessor(size_t *limit, const cudaOccDeviceProp *properties, const cudaOccDeviceState *state)
1030
+ {
1031
+ // Volta introduces a new API that allows for shared memory carveout preference. Because it is a shared memory preference,
1032
+ // it is handled separately from the cache config preference.
1033
+ if (properties->computeMajor >= 7) {
1034
+ return cudaOccSMemPreferenceVoltaPlus(limit, properties, state);
1035
+ }
1036
+ return cudaOccSMemPreference(limit, properties, state);
1037
+ }
1038
+
1039
+ /**
1040
+ * Return the per block shared memory limit based on function config
1041
+ */
1042
+ static __OCC_INLINE cudaOccError cudaOccSMemPerBlock(size_t *limit, const cudaOccDeviceProp *properties, cudaOccFuncShmemConfig shmemLimitConfig, size_t smemPerCta)
1043
+ {
1044
+ switch (properties->computeMajor) {
1045
+ case 2:
1046
+ case 3:
1047
+ case 4:
1048
+ case 5:
1049
+ case 6:
1050
+ *limit = properties->sharedMemPerBlock;
1051
+ break;
1052
+ case 7:
1053
+ case 8:
1054
+
1055
+
1056
+
1057
+ switch (shmemLimitConfig) {
1058
+ default:
1059
+ case FUNC_SHMEM_LIMIT_DEFAULT:
1060
+ *limit = properties->sharedMemPerBlock;
1061
+ break;
1062
+ case FUNC_SHMEM_LIMIT_OPTIN:
1063
+ if (smemPerCta > properties->sharedMemPerBlock) {
1064
+ *limit = properties->sharedMemPerBlockOptin;
1065
+ }
1066
+ else {
1067
+ *limit = properties->sharedMemPerBlock;
1068
+ }
1069
+ break;
1070
+ }
1071
+ break;
1072
+ default:
1073
+ return CUDA_OCC_ERROR_UNKNOWN_DEVICE;
1074
+ }
1075
+
1076
+ // Starting Ampere, CUDA driver reserves additional shared memory per block
1077
+ if (properties->computeMajor >= 8) {
1078
+ *limit += properties->reservedSharedMemPerBlock;
1079
+ }
1080
+
1081
+ return CUDA_OCC_SUCCESS;
1082
+ }
1083
+
1084
+ /**
1085
+ * Partitioned global caching mode support
1086
+ */
1087
+ static __OCC_INLINE cudaOccError cudaOccPartitionedGlobalCachingModeSupport(cudaOccPartitionedGCSupport *limit, const cudaOccDeviceProp *properties)
1088
+ {
1089
+ *limit = PARTITIONED_GC_NOT_SUPPORTED;
1090
+
1091
+ if ((properties->computeMajor == 5 && (properties->computeMinor == 2 || properties->computeMinor == 3)) ||
1092
+ properties->computeMajor == 6) {
1093
+ *limit = PARTITIONED_GC_SUPPORTED;
1094
+ }
1095
+
1096
+ if (properties->computeMajor == 6 && properties->computeMinor == 0) {
1097
+ *limit = PARTITIONED_GC_NOT_SUPPORTED;
1098
+ }
1099
+
1100
+ return CUDA_OCC_SUCCESS;
1101
+ }
1102
+
1103
+ ///////////////////////////////////////////////
1104
+ // User Input Sanity //
1105
+ ///////////////////////////////////////////////
1106
+
1107
+ static __OCC_INLINE cudaOccError cudaOccDevicePropCheck(const cudaOccDeviceProp *properties)
1108
+ {
1109
+ // Verify device properties
1110
+ //
1111
+ // Each of these limits must be a positive number.
1112
+ //
1113
+ // Compute capacity is checked during the occupancy calculation
1114
+ //
1115
+ if (properties->maxThreadsPerBlock <= 0 ||
1116
+ properties->maxThreadsPerMultiprocessor <= 0 ||
1117
+ properties->regsPerBlock <= 0 ||
1118
+ properties->regsPerMultiprocessor <= 0 ||
1119
+ properties->warpSize <= 0 ||
1120
+ properties->sharedMemPerBlock <= 0 ||
1121
+ properties->sharedMemPerMultiprocessor <= 0 ||
1122
+ properties->numSms <= 0) {
1123
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1124
+ }
1125
+
1126
+ return CUDA_OCC_SUCCESS;
1127
+ }
1128
+
1129
+ static __OCC_INLINE cudaOccError cudaOccFuncAttributesCheck(const cudaOccFuncAttributes *attributes)
1130
+ {
1131
+ // Verify function attributes
1132
+ //
1133
+ if (attributes->maxThreadsPerBlock <= 0 ||
1134
+ attributes->numRegs < 0) { // Compiler may choose not to use
1135
+ // any register (empty kernels,
1136
+ // etc.)
1137
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1138
+ }
1139
+
1140
+ return CUDA_OCC_SUCCESS;
1141
+ }
1142
+
1143
+ static __OCC_INLINE cudaOccError cudaOccDeviceStateCheck(const cudaOccDeviceState *state)
1144
+ {
1145
+ (void)state; // silence unused-variable warning
1146
+ // Placeholder
1147
+ //
1148
+
1149
+ return CUDA_OCC_SUCCESS;
1150
+ }
1151
+
1152
+ static __OCC_INLINE cudaOccError cudaOccInputCheck(
1153
+ const cudaOccDeviceProp *properties,
1154
+ const cudaOccFuncAttributes *attributes,
1155
+ const cudaOccDeviceState *state)
1156
+ {
1157
+ cudaOccError status = CUDA_OCC_SUCCESS;
1158
+
1159
+ status = cudaOccDevicePropCheck(properties);
1160
+ if (status != CUDA_OCC_SUCCESS) {
1161
+ return status;
1162
+ }
1163
+
1164
+ status = cudaOccFuncAttributesCheck(attributes);
1165
+ if (status != CUDA_OCC_SUCCESS) {
1166
+ return status;
1167
+ }
1168
+
1169
+ status = cudaOccDeviceStateCheck(state);
1170
+ if (status != CUDA_OCC_SUCCESS) {
1171
+ return status;
1172
+ }
1173
+
1174
+ return status;
1175
+ }
1176
+
1177
+ ///////////////////////////////////////////////
1178
+ // Occupancy calculation Functions //
1179
+ ///////////////////////////////////////////////
1180
+
1181
+ static __OCC_INLINE cudaOccPartitionedGCConfig cudaOccPartitionedGCExpected(
1182
+ const cudaOccDeviceProp *properties,
1183
+ const cudaOccFuncAttributes *attributes)
1184
+ {
1185
+ cudaOccPartitionedGCSupport gcSupport;
1186
+ cudaOccPartitionedGCConfig gcConfig;
1187
+
1188
+ cudaOccPartitionedGlobalCachingModeSupport(&gcSupport, properties);
1189
+
1190
+ gcConfig = attributes->partitionedGCConfig;
1191
+
1192
+ if (gcSupport == PARTITIONED_GC_NOT_SUPPORTED) {
1193
+ gcConfig = PARTITIONED_GC_OFF;
1194
+ }
1195
+
1196
+ return gcConfig;
1197
+ }
1198
+
1199
+ // Warp limit
1200
+ //
1201
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMWarpsLimit(
1202
+ int *limit,
1203
+ cudaOccPartitionedGCConfig gcConfig,
1204
+ const cudaOccDeviceProp *properties,
1205
+ const cudaOccFuncAttributes *attributes,
1206
+ int blockSize)
1207
+ {
1208
+ cudaOccError status = CUDA_OCC_SUCCESS;
1209
+ int maxWarpsPerSm;
1210
+ int warpsAllocatedPerCTA;
1211
+ int maxBlocks;
1212
+ (void)attributes; // silence unused-variable warning
1213
+
1214
+ if (blockSize > properties->maxThreadsPerBlock) {
1215
+ maxBlocks = 0;
1216
+ }
1217
+ else {
1218
+ maxWarpsPerSm = properties->maxThreadsPerMultiprocessor / properties->warpSize;
1219
+ warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
1220
+ maxBlocks = 0;
1221
+
1222
+ if (gcConfig != PARTITIONED_GC_OFF) {
1223
+ int maxBlocksPerSmPartition;
1224
+ int maxWarpsPerSmPartition;
1225
+
1226
+ // If partitioned global caching is on, then a CTA can only use a SM
1227
+ // partition (a half SM), and thus a half of the warp slots
1228
+ // available per SM
1229
+ //
1230
+ maxWarpsPerSmPartition = maxWarpsPerSm / 2;
1231
+ maxBlocksPerSmPartition = maxWarpsPerSmPartition / warpsAllocatedPerCTA;
1232
+ maxBlocks = maxBlocksPerSmPartition * 2;
1233
+ }
1234
+ // On hardware that supports partitioned global caching, each half SM is
1235
+ // guaranteed to support at least 32 warps (maximum number of warps of a
1236
+ // CTA), so caching will not cause 0 occupancy due to insufficient warp
1237
+ // allocation slots.
1238
+ //
1239
+ else {
1240
+ maxBlocks = maxWarpsPerSm / warpsAllocatedPerCTA;
1241
+ }
1242
+ }
1243
+
1244
+ *limit = maxBlocks;
1245
+
1246
+ return status;
1247
+ }
1248
+
1249
+ // Shared memory limit
1250
+ //
1251
+ static __OCC_INLINE cudaOccError cudaOccMaxBlocksPerSMSmemLimit(
1252
+ int *limit,
1253
+ cudaOccResult *result,
1254
+ const cudaOccDeviceProp *properties,
1255
+ const cudaOccFuncAttributes *attributes,
1256
+ const cudaOccDeviceState *state,
1257
+ int blockSize,
1258
+ size_t dynamicSmemSize)
1259
+ {
1260
+ cudaOccError status = CUDA_OCC_SUCCESS;
1261
+ int allocationGranularity;
1262
+ size_t userSmemPreference = 0;
1263
+ size_t totalSmemUsagePerCTA;
1264
+ size_t maxSmemUsagePerCTA;
1265
+ size_t smemAllocatedPerCTA;
1266
+ size_t staticSmemSize;
1267
+ size_t sharedMemPerMultiprocessor;
1268
+ size_t smemLimitPerCTA;
1269
+ int maxBlocks;
1270
+ int dynamicSmemSizeExceeded = 0;
1271
+ int totalSmemSizeExceeded = 0;
1272
+ (void)blockSize; // silence unused-variable warning
1273
+
1274
+ status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
1275
+ if (status != CUDA_OCC_SUCCESS) {
1276
+ return status;
1277
+ }
1278
+
1279
+ // Obtain the user preferred shared memory size. This setting is ignored if
1280
+ // user requests more shared memory than preferred.
1281
+ //
1282
+ status = cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
1283
+ if (status != CUDA_OCC_SUCCESS) {
1284
+ return status;
1285
+ }
1286
+
1287
+ staticSmemSize = attributes->sharedSizeBytes + properties->reservedSharedMemPerBlock;
1288
+ totalSmemUsagePerCTA = staticSmemSize + dynamicSmemSize;
1289
+ smemAllocatedPerCTA = __occRoundUp((int)totalSmemUsagePerCTA, (int)allocationGranularity);
1290
+
1291
+ maxSmemUsagePerCTA = staticSmemSize + attributes->maxDynamicSharedSizeBytes;
1292
+
1293
+ dynamicSmemSizeExceeded = 0;
1294
+ totalSmemSizeExceeded = 0;
1295
+
1296
+ // Obtain the user set maximum dynamic size if it exists
1297
+ // If so, the current launch dynamic shared memory must not
1298
+ // exceed the set limit
1299
+ if (attributes->shmemLimitConfig != FUNC_SHMEM_LIMIT_DEFAULT &&
1300
+ dynamicSmemSize > attributes->maxDynamicSharedSizeBytes) {
1301
+ dynamicSmemSizeExceeded = 1;
1302
+ }
1303
+
1304
+ status = cudaOccSMemPerBlock(&smemLimitPerCTA, properties, attributes->shmemLimitConfig, maxSmemUsagePerCTA);
1305
+ if (status != CUDA_OCC_SUCCESS) {
1306
+ return status;
1307
+ }
1308
+
1309
+ if (smemAllocatedPerCTA > smemLimitPerCTA) {
1310
+ totalSmemSizeExceeded = 1;
1311
+ }
1312
+
1313
+ if (dynamicSmemSizeExceeded || totalSmemSizeExceeded) {
1314
+ maxBlocks = 0;
1315
+ }
1316
+ else {
1317
+ // User requested shared memory limit is used as long as it is greater
1318
+ // than the total shared memory used per CTA, i.e. as long as at least
1319
+ // one CTA can be launched.
1320
+ if (userSmemPreference >= smemAllocatedPerCTA) {
1321
+ sharedMemPerMultiprocessor = userSmemPreference;
1322
+ }
1323
+ else {
1324
+ // On Volta+, user requested shared memory will limit occupancy
1325
+ // if it's less than shared memory per CTA. Otherwise, the
1326
+ // maximum shared memory limit is used.
1327
+ if (properties->computeMajor >= 7) {
1328
+ sharedMemPerMultiprocessor = smemAllocatedPerCTA;
1329
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&sharedMemPerMultiprocessor, properties);
1330
+ if (status != CUDA_OCC_SUCCESS) {
1331
+ return status;
1332
+ }
1333
+ }
1334
+ else {
1335
+ sharedMemPerMultiprocessor = properties->sharedMemPerMultiprocessor;
1336
+ }
1337
+ }
1338
+
1339
+ if (smemAllocatedPerCTA > 0) {
1340
+ maxBlocks = (int)(sharedMemPerMultiprocessor / smemAllocatedPerCTA);
1341
+ }
1342
+ else {
1343
+ maxBlocks = INT_MAX;
1344
+ }
1345
+ }
1346
+
1347
+ result->allocatedSharedMemPerBlock = smemAllocatedPerCTA;
1348
+
1349
+ *limit = maxBlocks;
1350
+
1351
+ return status;
1352
+ }
1353
+
1354
+ static __OCC_INLINE
1355
+ cudaOccError cudaOccMaxBlocksPerSMRegsLimit(
1356
+ int *limit,
1357
+ cudaOccPartitionedGCConfig *gcConfig,
1358
+ cudaOccResult *result,
1359
+ const cudaOccDeviceProp *properties,
1360
+ const cudaOccFuncAttributes *attributes,
1361
+ int blockSize)
1362
+ {
1363
+ cudaOccError status = CUDA_OCC_SUCCESS;
1364
+ int allocationGranularity;
1365
+ int warpsAllocatedPerCTA;
1366
+ int regsAllocatedPerCTA;
1367
+ int regsAssumedPerCTA;
1368
+ int regsPerWarp;
1369
+ int regsAllocatedPerWarp;
1370
+ int numSubPartitions;
1371
+ int numRegsPerSubPartition;
1372
+ int numWarpsPerSubPartition;
1373
+ int numWarpsPerSM;
1374
+ int maxBlocks;
1375
+ int maxRegsPerThread;
1376
+
1377
+ status = cudaOccRegAllocationGranularity(
1378
+ &allocationGranularity,
1379
+ properties);
1380
+ if (status != CUDA_OCC_SUCCESS) {
1381
+ return status;
1382
+ }
1383
+
1384
+ status = cudaOccRegAllocationMaxPerThread(
1385
+ &maxRegsPerThread,
1386
+ properties);
1387
+ if (status != CUDA_OCC_SUCCESS) {
1388
+ return status;
1389
+ }
1390
+
1391
+ status = cudaOccSubPartitionsPerMultiprocessor(&numSubPartitions, properties);
1392
+ if (status != CUDA_OCC_SUCCESS) {
1393
+ return status;
1394
+ }
1395
+
1396
+ warpsAllocatedPerCTA = __occDivideRoundUp(blockSize, properties->warpSize);
1397
+
1398
+ // GPUs of compute capability 2.x and higher allocate registers to warps
1399
+ //
1400
+ // Number of regs per warp is regs per thread x warp size, rounded up to
1401
+ // register allocation granularity
1402
+ //
1403
+ regsPerWarp = attributes->numRegs * properties->warpSize;
1404
+ regsAllocatedPerWarp = __occRoundUp(regsPerWarp, allocationGranularity);
1405
+ regsAllocatedPerCTA = regsAllocatedPerWarp * warpsAllocatedPerCTA;
1406
+
1407
+ // Hardware verifies if a launch fits the per-CTA register limit. For
1408
+ // historical reasons, the verification logic assumes register
1409
+ // allocations are made to all partitions simultaneously. Therefore, to
1410
+ // simulate the hardware check, the warp allocation needs to be rounded
1411
+ // up to the number of partitions.
1412
+ //
1413
+ regsAssumedPerCTA = regsAllocatedPerWarp * __occRoundUp(warpsAllocatedPerCTA, numSubPartitions);
1414
+
1415
+ if (properties->regsPerBlock < regsAssumedPerCTA || // Hardware check
1416
+ properties->regsPerBlock < regsAllocatedPerCTA || // Software check
1417
+ attributes->numRegs > maxRegsPerThread) { // Per thread limit check
1418
+ maxBlocks = 0;
1419
+ }
1420
+ else {
1421
+ if (regsAllocatedPerWarp > 0) {
1422
+ // Registers are allocated in each sub-partition. The max number
1423
+ // of warps that can fit on an SM is equal to the max number of
1424
+ // warps per sub-partition x number of sub-partitions.
1425
+ //
1426
+ numRegsPerSubPartition = properties->regsPerMultiprocessor / numSubPartitions;
1427
+ numWarpsPerSubPartition = numRegsPerSubPartition / regsAllocatedPerWarp;
1428
+
1429
+ maxBlocks = 0;
1430
+
1431
+ if (*gcConfig != PARTITIONED_GC_OFF) {
1432
+ int numSubPartitionsPerSmPartition;
1433
+ int numWarpsPerSmPartition;
1434
+ int maxBlocksPerSmPartition;
1435
+
1436
+ // If partitioned global caching is on, then a CTA can only
1437
+ // use a half SM, and thus a half of the registers available
1438
+ // per SM
1439
+ //
1440
+ numSubPartitionsPerSmPartition = numSubPartitions / 2;
1441
+ numWarpsPerSmPartition = numWarpsPerSubPartition * numSubPartitionsPerSmPartition;
1442
+ maxBlocksPerSmPartition = numWarpsPerSmPartition / warpsAllocatedPerCTA;
1443
+ maxBlocks = maxBlocksPerSmPartition * 2;
1444
+ }
1445
+
1446
+ // Try again if partitioned global caching is not enabled, or if
1447
+ // the CTA cannot fit on the SM with caching on (maxBlocks == 0). In the latter
1448
+ // case, the device will automatically turn off caching, except
1449
+ // if the user forces enablement via PARTITIONED_GC_ON_STRICT to calculate
1450
+ // occupancy and launch configuration.
1451
+ //
1452
+ if (maxBlocks == 0 && *gcConfig != PARTITIONED_GC_ON_STRICT) {
1453
+ // In case *gcConfig was PARTITIONED_GC_ON flip it OFF since
1454
+ // this is what it will be if we spread CTA across partitions.
1455
+ //
1456
+ *gcConfig = PARTITIONED_GC_OFF;
1457
+ numWarpsPerSM = numWarpsPerSubPartition * numSubPartitions;
1458
+ maxBlocks = numWarpsPerSM / warpsAllocatedPerCTA;
1459
+ }
1460
+ }
1461
+ else {
1462
+ maxBlocks = INT_MAX;
1463
+ }
1464
+ }
1465
+
1466
+
1467
+ result->allocatedRegistersPerBlock = regsAllocatedPerCTA;
1468
+
1469
+ *limit = maxBlocks;
1470
+
1471
+ return status;
1472
+ }
1473
+
1474
+ ///////////////////////////////////
1475
+ // API Implementations //
1476
+ ///////////////////////////////////
1477
+
1478
+ static __OCC_INLINE
1479
+ cudaOccError cudaOccMaxActiveBlocksPerMultiprocessor(
1480
+ cudaOccResult *result,
1481
+ const cudaOccDeviceProp *properties,
1482
+ const cudaOccFuncAttributes *attributes,
1483
+ const cudaOccDeviceState *state,
1484
+ int blockSize,
1485
+ size_t dynamicSmemSize)
1486
+ {
1487
+ cudaOccError status = CUDA_OCC_SUCCESS;
1488
+ int ctaLimitWarps = 0;
1489
+ int ctaLimitBlocks = 0;
1490
+ int ctaLimitSMem = 0;
1491
+ int ctaLimitRegs = 0;
1492
+ int ctaLimit = 0;
1493
+ unsigned int limitingFactors = 0;
1494
+
1495
+ cudaOccPartitionedGCConfig gcConfig = PARTITIONED_GC_OFF;
1496
+
1497
+ if (!result || !properties || !attributes || !state || blockSize <= 0) {
1498
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1499
+ }
1500
+
1501
+ ///////////////////////////
1502
+ // Check user input
1503
+ ///////////////////////////
1504
+
1505
+ status = cudaOccInputCheck(properties, attributes, state);
1506
+ if (status != CUDA_OCC_SUCCESS) {
1507
+ return status;
1508
+ }
1509
+
1510
+ ///////////////////////////
1511
+ // Initialization
1512
+ ///////////////////////////
1513
+
1514
+ gcConfig = cudaOccPartitionedGCExpected(properties, attributes);
1515
+
1516
+ ///////////////////////////
1517
+ // Compute occupancy
1518
+ ///////////////////////////
1519
+
1520
+ // Limits due to registers/SM
1521
+ // Also compute if partitioned global caching has to be turned off
1522
+ //
1523
+ status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegs, &gcConfig, result, properties, attributes, blockSize);
1524
+ if (status != CUDA_OCC_SUCCESS) {
1525
+ return status;
1526
+ }
1527
+
1528
+ // SMs on GP100 (6.0) have 2 subpartitions, while those on GP10x have 4.
1529
+ // As a result, an SM on GP100 may be able to run more CTAs than the one on GP10x.
1530
+ // For forward compatibility within Pascal family, if a function cannot run on GP10x (maxBlock == 0),
1531
+ // we do not let it run on any Pascal processor, even though it may be able to run on GP100.
1532
+ // Therefore, we check the occupancy on GP10x when it can run on GP100
1533
+ //
1534
+ if (properties->computeMajor == 6 && properties->computeMinor == 0 && ctaLimitRegs) {
1535
+ cudaOccDeviceProp propertiesGP10x;
1536
+ cudaOccPartitionedGCConfig gcConfigGP10x = gcConfig;
1537
+ int ctaLimitRegsGP10x = 0;
1538
+
1539
+ // Set up properties for GP10x
1540
+ memcpy(&propertiesGP10x, properties, sizeof(propertiesGP10x));
1541
+ propertiesGP10x.computeMinor = 1;
1542
+
1543
+ status = cudaOccMaxBlocksPerSMRegsLimit(&ctaLimitRegsGP10x, &gcConfigGP10x, result, &propertiesGP10x, attributes, blockSize);
1544
+ if (status != CUDA_OCC_SUCCESS) {
1545
+ return status;
1546
+ }
1547
+
1548
+ if (ctaLimitRegsGP10x == 0) {
1549
+ ctaLimitRegs = 0;
1550
+ }
1551
+ }
1552
+
1553
+ // Limits due to warps/SM
1554
+ //
1555
+ status = cudaOccMaxBlocksPerSMWarpsLimit(&ctaLimitWarps, gcConfig, properties, attributes, blockSize);
1556
+ if (status != CUDA_OCC_SUCCESS) {
1557
+ return status;
1558
+ }
1559
+
1560
+ // Limits due to blocks/SM
1561
+ //
1562
+ status = cudaOccMaxBlocksPerMultiprocessor(&ctaLimitBlocks, properties);
1563
+ if (status != CUDA_OCC_SUCCESS) {
1564
+ return status;
1565
+ }
1566
+
1567
+ // Limits due to shared memory/SM
1568
+ //
1569
+ status = cudaOccMaxBlocksPerSMSmemLimit(&ctaLimitSMem, result, properties, attributes, state, blockSize, dynamicSmemSize);
1570
+ if (status != CUDA_OCC_SUCCESS) {
1571
+ return status;
1572
+ }
1573
+
1574
+ ///////////////////////////
1575
+ // Overall occupancy
1576
+ ///////////////////////////
1577
+
1578
+ // Overall limit is min() of limits due to above reasons
1579
+ //
1580
+ ctaLimit = __occMin(ctaLimitRegs, __occMin(ctaLimitSMem, __occMin(ctaLimitWarps, ctaLimitBlocks)));
1581
+
1582
+ // Fill in the return values
1583
+ //
1584
+ // Determine occupancy limiting factors
1585
+ //
1586
+ if (ctaLimit == ctaLimitWarps) {
1587
+ limitingFactors |= OCC_LIMIT_WARPS;
1588
+ }
1589
+ if (ctaLimit == ctaLimitRegs) {
1590
+ limitingFactors |= OCC_LIMIT_REGISTERS;
1591
+ }
1592
+ if (ctaLimit == ctaLimitSMem) {
1593
+ limitingFactors |= OCC_LIMIT_SHARED_MEMORY;
1594
+ }
1595
+ if (ctaLimit == ctaLimitBlocks) {
1596
+ limitingFactors |= OCC_LIMIT_BLOCKS;
1597
+ }
1598
+ result->limitingFactors = limitingFactors;
1599
+
1600
+ result->blockLimitRegs = ctaLimitRegs;
1601
+ result->blockLimitSharedMem = ctaLimitSMem;
1602
+ result->blockLimitWarps = ctaLimitWarps;
1603
+ result->blockLimitBlocks = ctaLimitBlocks;
1604
+ result->partitionedGCConfig = gcConfig;
1605
+
1606
+ // Final occupancy
1607
+ result->activeBlocksPerMultiprocessor = ctaLimit;
1608
+
1609
+ return CUDA_OCC_SUCCESS;
1610
+ }
1611
+
1612
+ static __OCC_INLINE
1613
+ cudaOccError cudaOccAvailableDynamicSMemPerBlock(
1614
+ size_t *bytesAvailable,
1615
+ const cudaOccDeviceProp *properties,
1616
+ const cudaOccFuncAttributes *attributes,
1617
+ const cudaOccDeviceState *state,
1618
+ int numBlocks,
1619
+ int blockSize)
1620
+ {
1621
+ int allocationGranularity;
1622
+ size_t smemLimitPerBlock;
1623
+ size_t smemAvailableForDynamic;
1624
+ size_t userSmemPreference = 0;
1625
+ size_t sharedMemPerMultiprocessor;
1626
+ cudaOccResult result;
1627
+ cudaOccError status = CUDA_OCC_SUCCESS;
1628
+
1629
+ if (numBlocks <= 0)
1630
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1631
+
1632
+ // First compute occupancy of potential kernel launch.
1633
+ //
1634
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(&result, properties, attributes, state, blockSize, 0);
1635
+ if (status != CUDA_OCC_SUCCESS) {
1636
+ return status;
1637
+ }
1638
+ // Check if occupancy is achievable given user requested number of blocks.
1639
+ //
1640
+ if (result.activeBlocksPerMultiprocessor < numBlocks) {
1641
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1642
+ }
1643
+
1644
+ status = cudaOccSMemAllocationGranularity(&allocationGranularity, properties);
1645
+ if (status != CUDA_OCC_SUCCESS) {
1646
+ return status;
1647
+ }
1648
+
1649
+ // Return the per block shared memory limit based on function config.
1650
+ //
1651
+ status = cudaOccSMemPerBlock(&smemLimitPerBlock, properties, attributes->shmemLimitConfig, properties->sharedMemPerMultiprocessor);
1652
+ if (status != CUDA_OCC_SUCCESS) {
1653
+ return status;
1654
+ }
1655
+
1656
+ // If there is only a single block needed per SM, then the user preference can be ignored and the fully SW
1657
+ // limit is allowed to be used as shared memory otherwise if more than one block is needed, then the user
1658
+ // preference sets the total limit of available shared memory.
1659
+ //
1660
+ cudaOccSMemPerMultiprocessor(&userSmemPreference, properties, state);
1661
+ if (numBlocks == 1) {
1662
+ sharedMemPerMultiprocessor = smemLimitPerBlock;
1663
+ }
1664
+ else {
1665
+ if (!userSmemPreference) {
1666
+ userSmemPreference = 1 ;
1667
+ status = cudaOccAlignUpShmemSizeVoltaPlus(&userSmemPreference, properties);
1668
+ if (status != CUDA_OCC_SUCCESS) {
1669
+ return status;
1670
+ }
1671
+ }
1672
+ sharedMemPerMultiprocessor = userSmemPreference;
1673
+ }
1674
+
1675
+ // Compute total shared memory available per SM
1676
+ //
1677
+ smemAvailableForDynamic = sharedMemPerMultiprocessor / numBlocks;
1678
+ smemAvailableForDynamic = (smemAvailableForDynamic / allocationGranularity) * allocationGranularity;
1679
+
1680
+ // Cap shared memory
1681
+ //
1682
+ if (smemAvailableForDynamic > smemLimitPerBlock) {
1683
+ smemAvailableForDynamic = smemLimitPerBlock;
1684
+ }
1685
+
1686
+ // Now compute dynamic shared memory size
1687
+ smemAvailableForDynamic = smemAvailableForDynamic - attributes->sharedSizeBytes;
1688
+
1689
+ // Cap computed dynamic SM by user requested limit specified via cuFuncSetAttribute()
1690
+ //
1691
+ if (smemAvailableForDynamic > attributes->maxDynamicSharedSizeBytes)
1692
+ smemAvailableForDynamic = attributes->maxDynamicSharedSizeBytes;
1693
+
1694
+ *bytesAvailable = smemAvailableForDynamic;
1695
+ return CUDA_OCC_SUCCESS;
1696
+ }
1697
+
1698
+ static __OCC_INLINE
1699
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
1700
+ int *minGridSize,
1701
+ int *blockSize,
1702
+ const cudaOccDeviceProp *properties,
1703
+ const cudaOccFuncAttributes *attributes,
1704
+ const cudaOccDeviceState *state,
1705
+ size_t (*blockSizeToDynamicSMemSize)(int),
1706
+ size_t dynamicSMemSize)
1707
+ {
1708
+ cudaOccError status = CUDA_OCC_SUCCESS;
1709
+ cudaOccResult result;
1710
+
1711
+ // Limits
1712
+ int occupancyLimit;
1713
+ int granularity;
1714
+ int blockSizeLimit;
1715
+
1716
+ // Recorded maximum
1717
+ int maxBlockSize = 0;
1718
+ int numBlocks = 0;
1719
+ int maxOccupancy = 0;
1720
+
1721
+ // Temporary
1722
+ int blockSizeToTryAligned;
1723
+ int blockSizeToTry;
1724
+ int blockSizeLimitAligned;
1725
+ int occupancyInBlocks;
1726
+ int occupancyInThreads;
1727
+
1728
+ ///////////////////////////
1729
+ // Check user input
1730
+ ///////////////////////////
1731
+
1732
+ if (!minGridSize || !blockSize || !properties || !attributes || !state) {
1733
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1734
+ }
1735
+
1736
+ status = cudaOccInputCheck(properties, attributes, state);
1737
+ if (status != CUDA_OCC_SUCCESS) {
1738
+ return status;
1739
+ }
1740
+
1741
+ /////////////////////////////////////////////////////////////////////////////////
1742
+ // Try each block size, and pick the block size with maximum occupancy
1743
+ /////////////////////////////////////////////////////////////////////////////////
1744
+
1745
+ occupancyLimit = properties->maxThreadsPerMultiprocessor;
1746
+ granularity = properties->warpSize;
1747
+
1748
+ blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
1749
+ blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
1750
+
1751
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1752
+ blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
1753
+
1754
+ // Ignore dynamicSMemSize if the user provides a mapping
1755
+ //
1756
+ if (blockSizeToDynamicSMemSize) {
1757
+ dynamicSMemSize = (*blockSizeToDynamicSMemSize)(blockSizeToTry);
1758
+ }
1759
+
1760
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(
1761
+ &result,
1762
+ properties,
1763
+ attributes,
1764
+ state,
1765
+ blockSizeToTry,
1766
+ dynamicSMemSize);
1767
+
1768
+ if (status != CUDA_OCC_SUCCESS) {
1769
+ return status;
1770
+ }
1771
+
1772
+ occupancyInBlocks = result.activeBlocksPerMultiprocessor;
1773
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1774
+
1775
+ if (occupancyInThreads > maxOccupancy) {
1776
+ maxBlockSize = blockSizeToTry;
1777
+ numBlocks = occupancyInBlocks;
1778
+ maxOccupancy = occupancyInThreads;
1779
+ }
1780
+
1781
+ // Early out if we have reached the maximum
1782
+ //
1783
+ if (occupancyLimit == maxOccupancy) {
1784
+ break;
1785
+ }
1786
+ }
1787
+
1788
+ ///////////////////////////
1789
+ // Return best available
1790
+ ///////////////////////////
1791
+
1792
+ // Suggested min grid size to achieve a full machine launch
1793
+ //
1794
+ *minGridSize = numBlocks * properties->numSms;
1795
+ *blockSize = maxBlockSize;
1796
+
1797
+ return status;
1798
+ }
1799
+
1800
+
1801
+ #if defined(__cplusplus)
1802
+
1803
+ namespace {
1804
+
1805
+ __OCC_INLINE
1806
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSize(
1807
+ int *minGridSize,
1808
+ int *blockSize,
1809
+ const cudaOccDeviceProp *properties,
1810
+ const cudaOccFuncAttributes *attributes,
1811
+ const cudaOccDeviceState *state,
1812
+ size_t dynamicSMemSize)
1813
+ {
1814
+ return cudaOccMaxPotentialOccupancyBlockSize(
1815
+ minGridSize,
1816
+ blockSize,
1817
+ properties,
1818
+ attributes,
1819
+ state,
1820
+ NULL,
1821
+ dynamicSMemSize);
1822
+ }
1823
+
1824
+ template <typename UnaryFunction>
1825
+ __OCC_INLINE
1826
+ cudaOccError cudaOccMaxPotentialOccupancyBlockSizeVariableSMem(
1827
+ int *minGridSize,
1828
+ int *blockSize,
1829
+ const cudaOccDeviceProp *properties,
1830
+ const cudaOccFuncAttributes *attributes,
1831
+ const cudaOccDeviceState *state,
1832
+ UnaryFunction blockSizeToDynamicSMemSize)
1833
+ {
1834
+ cudaOccError status = CUDA_OCC_SUCCESS;
1835
+ cudaOccResult result;
1836
+
1837
+ // Limits
1838
+ int occupancyLimit;
1839
+ int granularity;
1840
+ int blockSizeLimit;
1841
+
1842
+ // Recorded maximum
1843
+ int maxBlockSize = 0;
1844
+ int numBlocks = 0;
1845
+ int maxOccupancy = 0;
1846
+
1847
+ // Temporary
1848
+ int blockSizeToTryAligned;
1849
+ int blockSizeToTry;
1850
+ int blockSizeLimitAligned;
1851
+ int occupancyInBlocks;
1852
+ int occupancyInThreads;
1853
+ size_t dynamicSMemSize;
1854
+
1855
+ ///////////////////////////
1856
+ // Check user input
1857
+ ///////////////////////////
1858
+
1859
+ if (!minGridSize || !blockSize || !properties || !attributes || !state) {
1860
+ return CUDA_OCC_ERROR_INVALID_INPUT;
1861
+ }
1862
+
1863
+ status = cudaOccInputCheck(properties, attributes, state);
1864
+ if (status != CUDA_OCC_SUCCESS) {
1865
+ return status;
1866
+ }
1867
+
1868
+ /////////////////////////////////////////////////////////////////////////////////
1869
+ // Try each block size, and pick the block size with maximum occupancy
1870
+ /////////////////////////////////////////////////////////////////////////////////
1871
+
1872
+ occupancyLimit = properties->maxThreadsPerMultiprocessor;
1873
+ granularity = properties->warpSize;
1874
+ blockSizeLimit = __occMin(properties->maxThreadsPerBlock, attributes->maxThreadsPerBlock);
1875
+ blockSizeLimitAligned = __occRoundUp(blockSizeLimit, granularity);
1876
+
1877
+ for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) {
1878
+ blockSizeToTry = __occMin(blockSizeLimit, blockSizeToTryAligned);
1879
+
1880
+ dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry);
1881
+
1882
+ status = cudaOccMaxActiveBlocksPerMultiprocessor(
1883
+ &result,
1884
+ properties,
1885
+ attributes,
1886
+ state,
1887
+ blockSizeToTry,
1888
+ dynamicSMemSize);
1889
+
1890
+ if (status != CUDA_OCC_SUCCESS) {
1891
+ return status;
1892
+ }
1893
+
1894
+ occupancyInBlocks = result.activeBlocksPerMultiprocessor;
1895
+
1896
+ occupancyInThreads = blockSizeToTry * occupancyInBlocks;
1897
+
1898
+ if (occupancyInThreads > maxOccupancy) {
1899
+ maxBlockSize = blockSizeToTry;
1900
+ numBlocks = occupancyInBlocks;
1901
+ maxOccupancy = occupancyInThreads;
1902
+ }
1903
+
1904
+ // Early out if we have reached the maximum
1905
+ //
1906
+ if (occupancyLimit == maxOccupancy) {
1907
+ break;
1908
+ }
1909
+ }
1910
+
1911
+ ///////////////////////////
1912
+ // Return best available
1913
+ ///////////////////////////
1914
+
1915
+ // Suggested min grid size to achieve a full machine launch
1916
+ //
1917
+ *minGridSize = numBlocks * properties->numSms;
1918
+ *blockSize = maxBlockSize;
1919
+
1920
+ return status;
1921
+ }
1922
+
1923
+ } // namespace anonymous
1924
+
1925
+ #endif /*__cplusplus */
1926
+
1927
+ #undef __OCC_INLINE
1928
+
1929
+ #endif /*__cuda_occupancy_h__*/
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2019 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef _CUDA_PIPELINE_H_
51
+ # define _CUDA_PIPELINE_H_
52
+
53
+ # include "cuda_pipeline_primitives.h"
54
+
55
+ # if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
56
+ # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
57
+ -std=c++11 compiler option.
58
+ # endif
59
+
60
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
61
+ # include "cuda_awbarrier.h"
62
+ # endif
63
+
64
+ // Integration with libcu++'s cuda::barrier<cuda::thread_scope_block>.
65
+
66
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
67
+ # if defined(_LIBCUDACXX_CUDA_ABI_VERSION)
68
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION
69
+ # else
70
+ # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4
71
+ # endif
72
+
73
+ # define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y
74
+ # define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y)
75
+ # define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION)
76
+
77
+ namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE {
78
+ struct __block_scope_barrier_base;
79
+ }}
80
+
81
+ # endif
82
+
83
+ _CUDA_PIPELINE_BEGIN_NAMESPACE
84
+
85
+ template<size_t N, typename T>
86
+ _CUDA_PIPELINE_QUALIFIER
87
+ auto segment(T* ptr) -> T(*)[N];
88
+
89
+ class pipeline {
90
+ public:
91
+ pipeline(const pipeline&) = delete;
92
+ pipeline(pipeline&&) = delete;
93
+ pipeline& operator=(const pipeline&) = delete;
94
+ pipeline& operator=(pipeline&&) = delete;
95
+
96
+ _CUDA_PIPELINE_QUALIFIER pipeline();
97
+ _CUDA_PIPELINE_QUALIFIER size_t commit();
98
+ _CUDA_PIPELINE_QUALIFIER void commit_and_wait();
99
+ _CUDA_PIPELINE_QUALIFIER void wait(size_t batch);
100
+ template<unsigned N>
101
+ _CUDA_PIPELINE_QUALIFIER void wait_prior();
102
+
103
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
104
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier);
105
+ _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier);
106
+ # endif
107
+
108
+ private:
109
+ size_t current_batch;
110
+ };
111
+
112
+ template<class T>
113
+ _CUDA_PIPELINE_QUALIFIER
114
+ void memcpy_async(T& dst, const T& src, pipeline& pipe);
115
+
116
+ template<class T, size_t DstN, size_t SrcN>
117
+ _CUDA_PIPELINE_QUALIFIER
118
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe);
119
+
120
+ template<size_t N, typename T>
121
+ _CUDA_PIPELINE_QUALIFIER
122
+ auto segment(T* ptr) -> T(*)[N]
123
+ {
124
+ return (T(*)[N])ptr;
125
+ }
126
+
127
+ _CUDA_PIPELINE_QUALIFIER
128
+ pipeline::pipeline()
129
+ : current_batch(0)
130
+ {
131
+ }
132
+
133
+ _CUDA_PIPELINE_QUALIFIER
134
+ size_t pipeline::commit()
135
+ {
136
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
137
+ return this->current_batch++;
138
+ }
139
+
140
+ _CUDA_PIPELINE_QUALIFIER
141
+ void pipeline::commit_and_wait()
142
+ {
143
+ (void)pipeline::commit();
144
+ pipeline::wait_prior<0>();
145
+ }
146
+
147
+ _CUDA_PIPELINE_QUALIFIER
148
+ void pipeline::wait(size_t batch)
149
+ {
150
+ const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0;
151
+
152
+ switch (prior) {
153
+ case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break;
154
+ case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break;
155
+ case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break;
156
+ case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break;
157
+ case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break;
158
+ case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break;
159
+ case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break;
160
+ case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break;
161
+ default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break;
162
+ }
163
+ }
164
+
165
+ template<unsigned N>
166
+ _CUDA_PIPELINE_QUALIFIER
167
+ void pipeline::wait_prior()
168
+ {
169
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<N>();
170
+ }
171
+
172
+ # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
173
+ _CUDA_PIPELINE_QUALIFIER
174
+ void pipeline::arrive_on(awbarrier& barrier)
175
+ {
176
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier);
177
+ }
178
+
179
+ _CUDA_PIPELINE_QUALIFIER
180
+ void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier)
181
+ {
182
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast<uint64_t *>(&barrier));
183
+ }
184
+ # endif
185
+
186
+ template<class T>
187
+ _CUDA_PIPELINE_QUALIFIER
188
+ void memcpy_async(T& dst, const T& src, pipeline& pipe)
189
+ {
190
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&src) & (alignof(T) - 1)));
191
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&dst) & (alignof(T) - 1)));
192
+
193
+ if (__is_trivially_copyable(T)) {
194
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed<sizeof(T), alignof(T)>(
195
+ reinterpret_cast<void*>(&dst), reinterpret_cast<const void*>(&src));
196
+ } else {
197
+ dst = src;
198
+ }
199
+ }
200
+
201
+ template<class T, size_t DstN, size_t SrcN>
202
+ _CUDA_PIPELINE_QUALIFIER
203
+ void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe)
204
+ {
205
+ constexpr size_t dst_size = sizeof(*dst);
206
+ constexpr size_t src_size = sizeof(*src);
207
+ static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size.");
208
+ static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size.");
209
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (dst_size - 1)));
210
+ _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (dst_size - 1)));
211
+
212
+ if (__is_trivially_copyable(T)) {
213
+ _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict<sizeof(*dst), sizeof(*src)>(
214
+ reinterpret_cast<void*>(*dst), reinterpret_cast<const void*>(*src));
215
+ } else {
216
+ for (size_t i = 0; i < DstN; ++i) {
217
+ (*dst)[i] = (i < SrcN) ? (*src)[i] : T();
218
+ }
219
+ }
220
+ }
221
+
222
+ _CUDA_PIPELINE_END_NAMESPACE
223
+
224
+ #endif /* !_CUDA_PIPELINE_H_ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime_api.h ADDED
The diff for this file is too large to render. See raw diff
 
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_vdpau_interop.h ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_VDPAU_INTEROP_H__)
51
+ #define __CUDA_VDPAU_INTEROP_H__
52
+
53
+ #include "cuda_runtime_api.h"
54
+
55
+ #include <vdpau/vdpau.h>
56
+
57
+ #if defined(__cplusplus)
58
+ extern "C" {
59
+ #endif /* __cplusplus */
60
+
61
+ /**
62
+ * \addtogroup CUDART_VDPAU VDPAU Interoperability
63
+ * This section describes the VDPAU interoperability functions of the CUDA
64
+ * runtime application programming interface.
65
+ *
66
+ * @{
67
+ */
68
+
69
+ /**
70
+ * \brief Gets the CUDA device associated with a VdpDevice.
71
+ *
72
+ * Returns the CUDA device associated with a VdpDevice, if applicable.
73
+ *
74
+ * \param device - Returns the device associated with vdpDevice, or -1 if
75
+ * the device associated with vdpDevice is not a compute device.
76
+ * \param vdpDevice - A VdpDevice handle
77
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
78
+ *
79
+ * \return
80
+ * ::cudaSuccess
81
+ * \notefnerr
82
+ *
83
+ * \sa
84
+ * ::cudaVDPAUSetVDPAUDevice,
85
+ * ::cuVDPAUGetDevice
86
+ */
87
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUGetDevice(int *device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
88
+
89
+ /**
90
+ * \brief Sets a CUDA device to use VDPAU interoperability
91
+ *
92
+ * Records \p vdpDevice as the VdpDevice for VDPAU interoperability
93
+ * with the CUDA device \p device and sets \p device as the current
94
+ * device for the calling host thread.
95
+ *
96
+ * If \p device has already been initialized then this call will fail
97
+ * with the error ::cudaErrorSetOnActiveProcess. In this case it is
98
+ * necessary to reset \p device using ::cudaDeviceReset() before
99
+ * VDPAU interoperability on \p device may be enabled.
100
+ *
101
+ * \param device - Device to use for VDPAU interoperability
102
+ * \param vdpDevice - The VdpDevice to interoperate with
103
+ * \param vdpGetProcAddress - VDPAU's VdpGetProcAddress function pointer
104
+ *
105
+ * \return
106
+ * ::cudaSuccess,
107
+ * ::cudaErrorInvalidDevice,
108
+ * ::cudaErrorSetOnActiveProcess
109
+ * \notefnerr
110
+ *
111
+ * \sa ::cudaGraphicsVDPAURegisterVideoSurface,
112
+ * ::cudaGraphicsVDPAURegisterOutputSurface,
113
+ * ::cudaDeviceReset
114
+ */
115
+ extern __host__ cudaError_t CUDARTAPI cudaVDPAUSetVDPAUDevice(int device, VdpDevice vdpDevice, VdpGetProcAddress *vdpGetProcAddress);
116
+
117
+ /**
118
+ * \brief Register a VdpVideoSurface object
119
+ *
120
+ * Registers the VdpVideoSurface specified by \p vdpSurface for access by CUDA.
121
+ * A handle to the registered object is returned as \p resource.
122
+ * The surface's intended usage is specified using \p flags, as follows:
123
+ *
124
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
125
+ * resource will be used. It is therefore assumed that this resource will be
126
+ * read from and written to by CUDA. This is the default value.
127
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
128
+ * will not write to this resource.
129
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
130
+ * CUDA will not read from this resource and will write over the
131
+ * entire contents of the resource, so none of the data previously
132
+ * stored in the resource will be preserved.
133
+ *
134
+ * \param resource - Pointer to the returned object handle
135
+ * \param vdpSurface - VDPAU object to be registered
136
+ * \param flags - Map flags
137
+ *
138
+ * \return
139
+ * ::cudaSuccess,
140
+ * ::cudaErrorInvalidDevice,
141
+ * ::cudaErrorInvalidValue,
142
+ * ::cudaErrorInvalidResourceHandle,
143
+ * ::cudaErrorUnknown
144
+ * \notefnerr
145
+ *
146
+ * \sa
147
+ * ::cudaVDPAUSetVDPAUDevice,
148
+ * ::cudaGraphicsUnregisterResource,
149
+ * ::cudaGraphicsSubResourceGetMappedArray,
150
+ * ::cuGraphicsVDPAURegisterVideoSurface
151
+ */
152
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterVideoSurface(struct cudaGraphicsResource **resource, VdpVideoSurface vdpSurface, unsigned int flags);
153
+
154
+ /**
155
+ * \brief Register a VdpOutputSurface object
156
+ *
157
+ * Registers the VdpOutputSurface specified by \p vdpSurface for access by CUDA.
158
+ * A handle to the registered object is returned as \p resource.
159
+ * The surface's intended usage is specified using \p flags, as follows:
160
+ *
161
+ * - ::cudaGraphicsMapFlagsNone: Specifies no hints about how this
162
+ * resource will be used. It is therefore assumed that this resource will be
163
+ * read from and written to by CUDA. This is the default value.
164
+ * - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA
165
+ * will not write to this resource.
166
+ * - ::cudaGraphicsMapFlagsWriteDiscard: Specifies that
167
+ * CUDA will not read from this resource and will write over the
168
+ * entire contents of the resource, so none of the data previously
169
+ * stored in the resource will be preserved.
170
+ *
171
+ * \param resource - Pointer to the returned object handle
172
+ * \param vdpSurface - VDPAU object to be registered
173
+ * \param flags - Map flags
174
+ *
175
+ * \return
176
+ * ::cudaSuccess,
177
+ * ::cudaErrorInvalidDevice,
178
+ * ::cudaErrorInvalidValue,
179
+ * ::cudaErrorInvalidResourceHandle,
180
+ * ::cudaErrorUnknown
181
+ * \notefnerr
182
+ *
183
+ * \sa
184
+ * ::cudaVDPAUSetVDPAUDevice,
185
+ * ::cudaGraphicsUnregisterResource,
186
+ * ::cudaGraphicsSubResourceGetMappedArray,
187
+ * ::cuGraphicsVDPAURegisterOutputSurface
188
+ */
189
+ extern __host__ cudaError_t CUDARTAPI cudaGraphicsVDPAURegisterOutputSurface(struct cudaGraphicsResource **resource, VdpOutputSurface vdpSurface, unsigned int flags);
190
+
191
+ /** @} */ /* END CUDART_VDPAU */
192
+
193
+ #if defined(__cplusplus)
194
+ }
195
+ #endif /* __cplusplus */
196
+
197
+ #endif /* __CUDA_VDPAU_INTEROP_H__ */
198
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cudart_platform.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2016 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #ifndef __CUDART_PLATFORM_H__
51
+ #define __CUDART_PLATFORM_H__
52
+
53
+ #if ((defined(__linux__) || defined(__QNX__)) && (defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)))
54
+ #define isEglSupported 1
55
+ #endif
56
+
57
+ #endif
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__DEVICE_LAUNCH_PARAMETERS_H__)
51
+ #define __DEVICE_LAUNCH_PARAMETERS_H__
52
+
53
+ #include "vector_types.h"
54
+
55
+ #if !defined(__STORAGE__)
56
+
57
+ #if defined(__CUDACC_RTC__)
58
+ #define __STORAGE__ \
59
+ extern const __device__
60
+ #else /* !__CUDACC_RTC__ */
61
+ #define __STORAGE__ \
62
+ extern const
63
+ #endif /* __CUDACC_RTC__ */
64
+
65
+ #endif /* __STORAGE__ */
66
+
67
+ #if defined(__cplusplus)
68
+ extern "C" {
69
+ #endif /* __cplusplus */
70
+
71
+ uint3 __device_builtin__ __STORAGE__ threadIdx;
72
+ uint3 __device_builtin__ __STORAGE__ blockIdx;
73
+ dim3 __device_builtin__ __STORAGE__ blockDim;
74
+ dim3 __device_builtin__ __STORAGE__ gridDim;
75
+ int __device_builtin__ __STORAGE__ warpSize;
76
+
77
+ #undef __STORAGE__
78
+
79
+ #if defined(__cplusplus)
80
+ }
81
+ #endif /* __cplusplus */
82
+
83
+ #if !defined(__cudaGet_threadIdx)
84
+
85
+ #define __cudaGet_threadIdx() \
86
+ threadIdx
87
+
88
+ #endif /* __cudaGet_threadIdx */
89
+
90
+ #if !defined(__cudaGet_blockIdx)
91
+
92
+ #define __cudaGet_blockIdx() \
93
+ blockIdx
94
+
95
+ #endif /* __cudaGet_blockIdx */
96
+
97
+ #if !defined(__cudaGet_blockDim)
98
+
99
+ #define __cudaGet_blockDim() \
100
+ blockDim
101
+
102
+ #endif /* __cudaGet_blockDim */
103
+
104
+ #if !defined(__cudaGet_gridDim)
105
+
106
+ #define __cudaGet_gridDim() \
107
+ gridDim
108
+
109
+ #endif /* __cudaGet_gridDim */
110
+
111
+ #if !defined(__cudaGet_warpSize)
112
+
113
+ #define __cudaGet_warpSize() \
114
+ warpSize
115
+
116
+ #endif /* __cudaGet_warpSize */
117
+
118
+ #endif /* !__DEVICE_LAUNCH_PARAMETERS_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/driver_types.h ADDED
The diff for this file is too large to render. See raw diff
 
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/host_config.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "host_config.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/host_config.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_HOST_CONFIG_H_WRAPPER__
65
+ #endif
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/library_types.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__LIBRARY_TYPES_H__)
51
+ #define __LIBRARY_TYPES_H__
52
+
53
+
54
+
55
+ typedef enum cudaDataType_t
56
+ {
57
+ CUDA_R_16F = 2, /* real as a half */
58
+ CUDA_C_16F = 6, /* complex as a pair of half numbers */
59
+ CUDA_R_16BF = 14, /* real as a nv_bfloat16 */
60
+ CUDA_C_16BF = 15, /* complex as a pair of nv_bfloat16 numbers */
61
+ CUDA_R_32F = 0, /* real as a float */
62
+ CUDA_C_32F = 4, /* complex as a pair of float numbers */
63
+ CUDA_R_64F = 1, /* real as a double */
64
+ CUDA_C_64F = 5, /* complex as a pair of double numbers */
65
+ CUDA_R_4I = 16, /* real as a signed 4-bit int */
66
+ CUDA_C_4I = 17, /* complex as a pair of signed 4-bit int numbers */
67
+ CUDA_R_4U = 18, /* real as a unsigned 4-bit int */
68
+ CUDA_C_4U = 19, /* complex as a pair of unsigned 4-bit int numbers */
69
+ CUDA_R_8I = 3, /* real as a signed 8-bit int */
70
+ CUDA_C_8I = 7, /* complex as a pair of signed 8-bit int numbers */
71
+ CUDA_R_8U = 8, /* real as a unsigned 8-bit int */
72
+ CUDA_C_8U = 9, /* complex as a pair of unsigned 8-bit int numbers */
73
+ CUDA_R_16I = 20, /* real as a signed 16-bit int */
74
+ CUDA_C_16I = 21, /* complex as a pair of signed 16-bit int numbers */
75
+ CUDA_R_16U = 22, /* real as a unsigned 16-bit int */
76
+ CUDA_C_16U = 23, /* complex as a pair of unsigned 16-bit int numbers */
77
+ CUDA_R_32I = 10, /* real as a signed 32-bit int */
78
+ CUDA_C_32I = 11, /* complex as a pair of signed 32-bit int numbers */
79
+ CUDA_R_32U = 12, /* real as a unsigned 32-bit int */
80
+ CUDA_C_32U = 13, /* complex as a pair of unsigned 32-bit int numbers */
81
+ CUDA_R_64I = 24, /* real as a signed 64-bit int */
82
+ CUDA_C_64I = 25, /* complex as a pair of signed 64-bit int numbers */
83
+ CUDA_R_64U = 26, /* real as a unsigned 64-bit int */
84
+ CUDA_C_64U = 27, /* complex as a pair of unsigned 64-bit int numbers */
85
+
86
+
87
+
88
+
89
+ } cudaDataType;
90
+
91
+
92
+ typedef enum libraryPropertyType_t
93
+ {
94
+ MAJOR_VERSION,
95
+ MINOR_VERSION,
96
+ PATCH_LEVEL
97
+ } libraryPropertyType;
98
+
99
+
100
+ #ifndef __cplusplus
101
+ typedef enum cudaDataType_t cudaDataType_t;
102
+ typedef enum libraryPropertyType_t libraryPropertyType_t;
103
+ #endif
104
+
105
+ #endif /* !__LIBRARY_TYPES_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/math_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("math_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "math_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/math_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_MATH_FUNCTIONS_H_WRAPPER__
65
+ #endif
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/mma.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
52
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__
53
+ #endif
54
+
55
+ #include "crt/mma.h"
56
+
57
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__)
58
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
59
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_MMA_H_WRAPPER__
60
+ #endif
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_30_INTRINSICS_H__)
51
+ #define __SM_30_INTRINSICS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_30_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_30_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ #ifndef __CUDA_ARCH__
72
+ #define __DEF_IF_HOST { }
73
+ #else /* !__CUDA_ARCH__ */
74
+ #define __DEF_IF_HOST ;
75
+ #endif /* __CUDA_ARCH__ */
76
+
77
+
78
+ /*******************************************************************************
79
+ * *
80
+ * Below are declarations of SM-3.0 intrinsics which are included as *
81
+ * source (instead of being built in to the compiler) *
82
+ * *
83
+ *******************************************************************************/
84
+
85
+ #if !defined warpSize && !defined __local_warpSize
86
+ #define warpSize 32
87
+ #define __local_warpSize
88
+ #endif
89
+
90
+ #if defined(_WIN32)
91
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
92
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
93
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
94
+ #else
95
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
96
+ #endif
97
+
98
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
99
+ #define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)."
100
+ #endif
101
+
102
+ __SM_30_INTRINSICS_DECL__ unsigned __fns(unsigned mask, unsigned base, int offset) __DEF_IF_HOST
103
+ __SM_30_INTRINSICS_DECL__ void __barrier_sync(unsigned id) __DEF_IF_HOST
104
+ __SM_30_INTRINSICS_DECL__ void __barrier_sync_count(unsigned id, unsigned cnt) __DEF_IF_HOST
105
+ __SM_30_INTRINSICS_DECL__ void __syncwarp(unsigned mask=0xFFFFFFFF) __DEF_IF_HOST
106
+ __SM_30_INTRINSICS_DECL__ int __all_sync(unsigned mask, int pred) __DEF_IF_HOST
107
+ __SM_30_INTRINSICS_DECL__ int __any_sync(unsigned mask, int pred) __DEF_IF_HOST
108
+ __SM_30_INTRINSICS_DECL__ int __uni_sync(unsigned mask, int pred) __DEF_IF_HOST
109
+ __SM_30_INTRINSICS_DECL__ unsigned __ballot_sync(unsigned mask, int pred) __DEF_IF_HOST
110
+ __SM_30_INTRINSICS_DECL__ unsigned __activemask() __DEF_IF_HOST
111
+
112
+ // Warp register exchange (shuffle) intrinsics.
113
+ // Notes:
114
+ // a) Warp size is hardcoded to 32 here, because the compiler does not know
115
+ // the "warpSize" constant at this time
116
+ // b) we cannot map the float __shfl to the int __shfl because it'll mess with
117
+ // the register number (especially if you're doing two shfls to move a double).
118
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
119
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) int __shfl(int var, int srcLane, int width=warpSize) __DEF_IF_HOST
120
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned int __shfl(unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST
121
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) int __shfl_up(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
122
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned int __shfl_up(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
123
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) int __shfl_down(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
124
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned int __shfl_down(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
125
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) int __shfl_xor(int var, int laneMask, int width=warpSize) __DEF_IF_HOST
126
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned int __shfl_xor(unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST
127
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) float __shfl(float var, int srcLane, int width=warpSize) __DEF_IF_HOST
128
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) float __shfl_up(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
129
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) float __shfl_down(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
130
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) float __shfl_xor(float var, int laneMask, int width=warpSize) __DEF_IF_HOST
131
+ #endif
132
+
133
+ __SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width=warpSize) __DEF_IF_HOST
134
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST
135
+ __SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
136
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
137
+ __SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
138
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
139
+ __SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width=warpSize) __DEF_IF_HOST
140
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST
141
+ __SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width=warpSize) __DEF_IF_HOST
142
+ __SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
143
+ __SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
144
+ __SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width=warpSize) __DEF_IF_HOST
145
+
146
+ // 64-bits SHFL
147
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
148
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long long __shfl(unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
149
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long long __shfl(long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
150
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long long __shfl_up(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
151
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
152
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long long __shfl_down(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
153
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
154
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long long __shfl_xor(long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
155
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
156
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) double __shfl(double var, int srcLane, int width=warpSize) __DEF_IF_HOST
157
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) double __shfl_up(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
158
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) double __shfl_down(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
159
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) double __shfl_xor(double var, int laneMask, int width=warpSize) __DEF_IF_HOST
160
+ #endif
161
+
162
+ __SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
163
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST
164
+ __SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
165
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
166
+ __SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
167
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
168
+ __SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
169
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST
170
+ __SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width=warpSize) __DEF_IF_HOST
171
+ __SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
172
+ __SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
173
+ __SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width=warpSize) __DEF_IF_HOST
174
+
175
+ // long needs some help to choose between 32-bits and 64-bits
176
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
177
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long __shfl(long var, int srcLane, int width=warpSize) __DEF_IF_HOST
178
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long __shfl(unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST
179
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long __shfl_up(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
180
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long __shfl_up(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
181
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long __shfl_down(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
182
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long __shfl_down(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
183
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long __shfl_xor(long var, int laneMask, int width=warpSize) __DEF_IF_HOST
184
+ __SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long __shfl_xor(unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST
185
+ #endif
186
+
187
+ __SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width=warpSize) __DEF_IF_HOST
188
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST
189
+ __SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
190
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
191
+ __SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
192
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST
193
+ __SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width=warpSize) __DEF_IF_HOST
194
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST
195
+
196
+ #undef __DEPRECATED__
197
+ #undef __WSB_DEPRECATION_MESSAGE
198
+
199
+ #if defined(__local_warpSize)
200
+ #undef warpSize
201
+ #undef __local_warpSize
202
+ #endif
203
+
204
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */
205
+
206
+ #endif /* __cplusplus && __CUDACC__ */
207
+
208
+ #undef __DEF_IF_HOST
209
+ #undef __SM_30_INTRINSICS_DECL__
210
+
211
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
212
+ #include "sm_30_intrinsics.hpp"
213
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
214
+
215
+ #endif /* !__SM_30_INTRINSICS_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_30_INTRINSICS_HPP__)
51
+ #define __SM_30_INTRINSICS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_30_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_30_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ // In here are intrinsics which are built in to the compiler. These may be
72
+ // referenced by intrinsic implementations from this file.
73
+ extern "C"
74
+ {
75
+ }
76
+
77
+ /*******************************************************************************
78
+ * *
79
+ * Below are implementations of SM-3.0 intrinsics which are included as *
80
+ * source (instead of being built in to the compiler) *
81
+ * *
82
+ *******************************************************************************/
83
+
84
+ #if !defined warpSize && !defined __local_warpSize
85
+ #define warpSize 32
86
+ #define __local_warpSize
87
+ #endif
88
+
89
+ __SM_30_INTRINSICS_DECL__
90
+ unsigned __fns(unsigned mask, unsigned base, int offset) {
91
+ extern __device__ __device_builtin__ unsigned int __nvvm_fns(unsigned int mask, unsigned int base, int offset);
92
+ return __nvvm_fns(mask, base, offset);
93
+ }
94
+
95
+ __SM_30_INTRINSICS_DECL__
96
+ void __barrier_sync(unsigned id) {
97
+ extern __device__ __device_builtin__ void __nvvm_barrier_sync(unsigned id);
98
+ return __nvvm_barrier_sync(id);
99
+ }
100
+
101
+ __SM_30_INTRINSICS_DECL__
102
+ void __barrier_sync_count(unsigned id, unsigned cnt) {
103
+ extern __device__ __device_builtin__ void __nvvm_barrier_sync_cnt(unsigned id, unsigned cnt);
104
+ return __nvvm_barrier_sync_cnt(id, cnt);
105
+ }
106
+
107
+ __SM_30_INTRINSICS_DECL__
108
+ void __syncwarp(unsigned mask) {
109
+ extern __device__ __device_builtin__ void __nvvm_bar_warp_sync(unsigned mask);
110
+ return __nvvm_bar_warp_sync(mask);
111
+ }
112
+
113
+ __SM_30_INTRINSICS_DECL__
114
+ int __all_sync(unsigned mask, int pred) {
115
+ extern __device__ __device_builtin__ int __nvvm_vote_all_sync(unsigned int mask, int pred);
116
+ return __nvvm_vote_all_sync(mask, pred);
117
+ }
118
+
119
+ __SM_30_INTRINSICS_DECL__
120
+ int __any_sync(unsigned mask, int pred) {
121
+ extern __device__ __device_builtin__ int __nvvm_vote_any_sync(unsigned int mask, int pred);
122
+ return __nvvm_vote_any_sync(mask, pred);
123
+ }
124
+
125
+ __SM_30_INTRINSICS_DECL__
126
+ int __uni_sync(unsigned mask, int pred) {
127
+ extern __device__ __device_builtin__ int __nvvm_vote_uni_sync(unsigned int mask, int pred);
128
+ return __nvvm_vote_uni_sync(mask, pred);
129
+ }
130
+
131
+ __SM_30_INTRINSICS_DECL__
132
+ unsigned __ballot_sync(unsigned mask, int pred) {
133
+ extern __device__ __device_builtin__ unsigned int __nvvm_vote_ballot_sync(unsigned int mask, int pred);
134
+ return __nvvm_vote_ballot_sync(mask, pred);
135
+ }
136
+
137
+ __SM_30_INTRINSICS_DECL__
138
+ unsigned __activemask() {
139
+ unsigned ret;
140
+ asm volatile ("activemask.b32 %0;" : "=r"(ret));
141
+ return ret;
142
+ }
143
+
144
+ // These are removed starting with compute_70 and onwards
145
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
146
+
147
+ __SM_30_INTRINSICS_DECL__ int __shfl(int var, int srcLane, int width) {
148
+ int ret;
149
+ int c = ((warpSize-width) << 8) | 0x1f;
150
+ asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(srcLane), "r"(c));
151
+ return ret;
152
+ }
153
+
154
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl(unsigned int var, int srcLane, int width) {
155
+ return (unsigned int) __shfl((int)var, srcLane, width);
156
+ }
157
+
158
+ __SM_30_INTRINSICS_DECL__ int __shfl_up(int var, unsigned int delta, int width) {
159
+ int ret;
160
+ int c = (warpSize-width) << 8;
161
+ asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c));
162
+ return ret;
163
+ }
164
+
165
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_up(unsigned int var, unsigned int delta, int width) {
166
+ return (unsigned int) __shfl_up((int)var, delta, width);
167
+ }
168
+
169
+ __SM_30_INTRINSICS_DECL__ int __shfl_down(int var, unsigned int delta, int width) {
170
+ int ret;
171
+ int c = ((warpSize-width) << 8) | 0x1f;
172
+ asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c));
173
+ return ret;
174
+ }
175
+
176
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_down(unsigned int var, unsigned int delta, int width) {
177
+ return (unsigned int) __shfl_down((int)var, delta, width);
178
+ }
179
+
180
+ __SM_30_INTRINSICS_DECL__ int __shfl_xor(int var, int laneMask, int width) {
181
+ int ret;
182
+ int c = ((warpSize-width) << 8) | 0x1f;
183
+ asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(laneMask), "r"(c));
184
+ return ret;
185
+ }
186
+
187
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor(unsigned int var, int laneMask, int width) {
188
+ return (unsigned int) __shfl_xor((int)var, laneMask, width);
189
+ }
190
+
191
+ __SM_30_INTRINSICS_DECL__ float __shfl(float var, int srcLane, int width) {
192
+ float ret;
193
+ int c;
194
+ c = ((warpSize-width) << 8) | 0x1f;
195
+ asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(srcLane), "r"(c));
196
+ return ret;
197
+ }
198
+
199
+ __SM_30_INTRINSICS_DECL__ float __shfl_up(float var, unsigned int delta, int width) {
200
+ float ret;
201
+ int c;
202
+ c = (warpSize-width) << 8;
203
+ asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c));
204
+ return ret;
205
+ }
206
+
207
+ __SM_30_INTRINSICS_DECL__ float __shfl_down(float var, unsigned int delta, int width) {
208
+ float ret;
209
+ int c;
210
+ c = ((warpSize-width) << 8) | 0x1f;
211
+ asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c));
212
+ return ret;
213
+ }
214
+
215
+ __SM_30_INTRINSICS_DECL__ float __shfl_xor(float var, int laneMask, int width) {
216
+ float ret;
217
+ int c;
218
+ c = ((warpSize-width) << 8) | 0x1f;
219
+ asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(laneMask), "r"(c));
220
+ return ret;
221
+ }
222
+
223
+ // 64-bits SHFL
224
+
225
+ __SM_30_INTRINSICS_DECL__ long long __shfl(long long var, int srcLane, int width) {
226
+ int lo, hi;
227
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
228
+ hi = __shfl(hi, srcLane, width);
229
+ lo = __shfl(lo, srcLane, width);
230
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
231
+ return var;
232
+ }
233
+
234
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl(unsigned long long var, int srcLane, int width) {
235
+ return (unsigned long long) __shfl((long long) var, srcLane, width);
236
+ }
237
+
238
+ __SM_30_INTRINSICS_DECL__ long long __shfl_up(long long var, unsigned int delta, int width) {
239
+ int lo, hi;
240
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
241
+ hi = __shfl_up(hi, delta, width);
242
+ lo = __shfl_up(lo, delta, width);
243
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
244
+ return var;
245
+ }
246
+
247
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width) {
248
+ return (unsigned long long) __shfl_up((long long) var, delta, width);
249
+ }
250
+
251
+ __SM_30_INTRINSICS_DECL__ long long __shfl_down(long long var, unsigned int delta, int width) {
252
+ int lo, hi;
253
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
254
+ hi = __shfl_down(hi, delta, width);
255
+ lo = __shfl_down(lo, delta, width);
256
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
257
+ return var;
258
+ }
259
+
260
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width) {
261
+ return (unsigned long long) __shfl_down((long long) var, delta, width);
262
+ }
263
+
264
+ __SM_30_INTRINSICS_DECL__ long long __shfl_xor(long long var, int laneMask, int width) {
265
+ int lo, hi;
266
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
267
+ hi = __shfl_xor(hi, laneMask, width);
268
+ lo = __shfl_xor(lo, laneMask, width);
269
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
270
+ return var;
271
+ }
272
+
273
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width) {
274
+ return (unsigned long long) __shfl_xor((long long) var, laneMask, width);
275
+ }
276
+
277
+ __SM_30_INTRINSICS_DECL__ double __shfl(double var, int srcLane, int width) {
278
+ unsigned lo, hi;
279
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
280
+ hi = __shfl(hi, srcLane, width);
281
+ lo = __shfl(lo, srcLane, width);
282
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
283
+ return var;
284
+ }
285
+
286
+ __SM_30_INTRINSICS_DECL__ double __shfl_up(double var, unsigned int delta, int width) {
287
+ unsigned lo, hi;
288
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
289
+ hi = __shfl_up(hi, delta, width);
290
+ lo = __shfl_up(lo, delta, width);
291
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
292
+ return var;
293
+ }
294
+
295
+ __SM_30_INTRINSICS_DECL__ double __shfl_down(double var, unsigned int delta, int width) {
296
+ unsigned lo, hi;
297
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
298
+ hi = __shfl_down(hi, delta, width);
299
+ lo = __shfl_down(lo, delta, width);
300
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
301
+ return var;
302
+ }
303
+
304
+ __SM_30_INTRINSICS_DECL__ double __shfl_xor(double var, int laneMask, int width) {
305
+ unsigned lo, hi;
306
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
307
+ hi = __shfl_xor(hi, laneMask, width);
308
+ lo = __shfl_xor(lo, laneMask, width);
309
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
310
+ return var;
311
+ }
312
+
313
+ __SM_30_INTRINSICS_DECL__ long __shfl(long var, int srcLane, int width) {
314
+ return (sizeof(long) == sizeof(long long)) ?
315
+ __shfl((long long) var, srcLane, width) :
316
+ __shfl((int) var, srcLane, width);
317
+ }
318
+
319
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl(unsigned long var, int srcLane, int width) {
320
+ return (sizeof(long) == sizeof(long long)) ?
321
+ __shfl((unsigned long long) var, srcLane, width) :
322
+ __shfl((unsigned int) var, srcLane, width);
323
+ }
324
+
325
+ __SM_30_INTRINSICS_DECL__ long __shfl_up(long var, unsigned int delta, int width) {
326
+ return (sizeof(long) == sizeof(long long)) ?
327
+ __shfl_up((long long) var, delta, width) :
328
+ __shfl_up((int) var, delta, width);
329
+ }
330
+
331
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_up(unsigned long var, unsigned int delta, int width) {
332
+ return (sizeof(long) == sizeof(long long)) ?
333
+ __shfl_up((unsigned long long) var, delta, width) :
334
+ __shfl_up((unsigned int) var, delta, width);
335
+ }
336
+
337
+ __SM_30_INTRINSICS_DECL__ long __shfl_down(long var, unsigned int delta, int width) {
338
+ return (sizeof(long) == sizeof(long long)) ?
339
+ __shfl_down((long long) var, delta, width) :
340
+ __shfl_down((int) var, delta, width);
341
+ }
342
+
343
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_down(unsigned long var, unsigned int delta, int width) {
344
+ return (sizeof(long) == sizeof(long long)) ?
345
+ __shfl_down((unsigned long long) var, delta, width) :
346
+ __shfl_down((unsigned int) var, delta, width);
347
+ }
348
+
349
+ __SM_30_INTRINSICS_DECL__ long __shfl_xor(long var, int laneMask, int width) {
350
+ return (sizeof(long) == sizeof(long long)) ?
351
+ __shfl_xor((long long) var, laneMask, width) :
352
+ __shfl_xor((int) var, laneMask, width);
353
+ }
354
+
355
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor(unsigned long var, int laneMask, int width) {
356
+ return (sizeof(long) == sizeof(long long)) ?
357
+ __shfl_xor((unsigned long long) var, laneMask, width) :
358
+ __shfl_xor((unsigned int) var, laneMask, width);
359
+ }
360
+
361
+ #endif /* !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */
362
+
363
+ // Warp register exchange (shuffle) intrinsics.
364
+ // Notes:
365
+ // a) Warp size is hardcoded to 32 here, because the compiler does not know
366
+ // the "warpSize" constant at this time
367
+ // b) we cannot map the float __shfl to the int __shfl because it'll mess with
368
+ // the register number (especially if you're doing two shfls to move a double).
369
+ __SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width) {
370
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
371
+ int ret;
372
+ int c = ((warpSize-width) << 8) | 0x1f;
373
+ ret = __nvvm_shfl_idx_sync(mask, var, srcLane, c);
374
+ return ret;
375
+ }
376
+
377
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width) {
378
+ return (unsigned int) __shfl_sync(mask, (int)var, srcLane, width);
379
+ }
380
+
381
+ __SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width) {
382
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
383
+ int ret;
384
+ int c = (warpSize-width) << 8;
385
+ ret = __nvvm_shfl_up_sync(mask, var, delta, c);
386
+ return ret;
387
+ }
388
+
389
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width) {
390
+ return (unsigned int) __shfl_up_sync(mask, (int)var, delta, width);
391
+ }
392
+
393
+ __SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width) {
394
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
395
+ int ret;
396
+ int c = ((warpSize-width) << 8) | 0x1f;
397
+ ret = __nvvm_shfl_down_sync(mask, var, delta, c);
398
+ return ret;
399
+ }
400
+
401
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width) {
402
+ return (unsigned int) __shfl_down_sync(mask, (int)var, delta, width);
403
+ }
404
+
405
+ __SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width) {
406
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
407
+ int ret;
408
+ int c = ((warpSize-width) << 8) | 0x1f;
409
+ ret = __nvvm_shfl_bfly_sync(mask, var, laneMask, c);
410
+ return ret;
411
+ }
412
+
413
+ __SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width) {
414
+ return (unsigned int) __shfl_xor_sync(mask, (int)var, laneMask, width);
415
+ }
416
+
417
+ __SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width) {
418
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
419
+ int ret;
420
+ int c;
421
+ c = ((warpSize-width) << 8) | 0x1f;
422
+ ret = __nvvm_shfl_idx_sync(mask, __float_as_int(var), srcLane, c);
423
+ return __int_as_float(ret);
424
+ }
425
+
426
+ __SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width) {
427
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
428
+ int ret;
429
+ int c;
430
+ c = (warpSize-width) << 8;
431
+ ret = __nvvm_shfl_up_sync(mask, __float_as_int(var), delta, c);
432
+ return __int_as_float(ret);
433
+ }
434
+
435
+ __SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width) {
436
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
437
+ int ret;
438
+ int c;
439
+ c = ((warpSize-width) << 8) | 0x1f;
440
+ ret = __nvvm_shfl_down_sync(mask, __float_as_int(var), delta, c);
441
+ return __int_as_float(ret);
442
+ }
443
+
444
+ __SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width) {
445
+ extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c);
446
+ int ret;
447
+ int c;
448
+ c = ((warpSize-width) << 8) | 0x1f;
449
+ ret = __nvvm_shfl_bfly_sync(mask, __float_as_int(var), laneMask, c);
450
+ return __int_as_float(ret);
451
+ }
452
+
453
+ // 64-bits SHFL
454
+ __SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width) {
455
+ int lo, hi;
456
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
457
+ hi = __shfl_sync(mask, hi, srcLane, width);
458
+ lo = __shfl_sync(mask, lo, srcLane, width);
459
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
460
+ return var;
461
+ }
462
+
463
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width) {
464
+ return (unsigned long long) __shfl_sync(mask, (long long) var, srcLane, width);
465
+ }
466
+
467
+ __SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width) {
468
+ int lo, hi;
469
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
470
+ hi = __shfl_up_sync(mask, hi, delta, width);
471
+ lo = __shfl_up_sync(mask, lo, delta, width);
472
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
473
+ return var;
474
+ }
475
+
476
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) {
477
+ return (unsigned long long) __shfl_up_sync(mask, (long long) var, delta, width);
478
+ }
479
+
480
+ __SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width) {
481
+ int lo, hi;
482
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
483
+ hi = __shfl_down_sync(mask, hi, delta, width);
484
+ lo = __shfl_down_sync(mask, lo, delta, width);
485
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
486
+ return var;
487
+ }
488
+
489
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) {
490
+ return (unsigned long long) __shfl_down_sync(mask, (long long) var, delta, width);
491
+ }
492
+
493
+ __SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width) {
494
+ int lo, hi;
495
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
496
+ hi = __shfl_xor_sync(mask, hi, laneMask, width);
497
+ lo = __shfl_xor_sync(mask, lo, laneMask, width);
498
+ asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi));
499
+ return var;
500
+ }
501
+
502
+ __SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width) {
503
+ return (unsigned long long) __shfl_xor_sync(mask, (long long) var, laneMask, width);
504
+ }
505
+
506
+ __SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width) {
507
+ unsigned lo, hi;
508
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
509
+ hi = __shfl_sync(mask, hi, srcLane, width);
510
+ lo = __shfl_sync(mask, lo, srcLane, width);
511
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
512
+ return var;
513
+ }
514
+
515
+ __SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width) {
516
+ unsigned lo, hi;
517
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
518
+ hi = __shfl_up_sync(mask, hi, delta, width);
519
+ lo = __shfl_up_sync(mask, lo, delta, width);
520
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
521
+ return var;
522
+ }
523
+
524
+ __SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width) {
525
+ unsigned lo, hi;
526
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
527
+ hi = __shfl_down_sync(mask, hi, delta, width);
528
+ lo = __shfl_down_sync(mask, lo, delta, width);
529
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
530
+ return var;
531
+ }
532
+
533
+ __SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width) {
534
+ unsigned lo, hi;
535
+ asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
536
+ hi = __shfl_xor_sync(mask, hi, laneMask, width);
537
+ lo = __shfl_xor_sync(mask, lo, laneMask, width);
538
+ asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi));
539
+ return var;
540
+ }
541
+
542
+ // long needs some help to choose between 32-bits and 64-bits
543
+
544
+ __SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width) {
545
+ return (sizeof(long) == sizeof(long long)) ?
546
+ __shfl_sync(mask, (long long) var, srcLane, width) :
547
+ __shfl_sync(mask, (int) var, srcLane, width);
548
+ }
549
+
550
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width) {
551
+ return (sizeof(long) == sizeof(long long)) ?
552
+ __shfl_sync(mask, (unsigned long long) var, srcLane, width) :
553
+ __shfl_sync(mask, (unsigned int) var, srcLane, width);
554
+ }
555
+
556
+ __SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width) {
557
+ return (sizeof(long) == sizeof(long long)) ?
558
+ __shfl_up_sync(mask, (long long) var, delta, width) :
559
+ __shfl_up_sync(mask, (int) var, delta, width);
560
+ }
561
+
562
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width) {
563
+ return (sizeof(long) == sizeof(long long)) ?
564
+ __shfl_up_sync(mask, (unsigned long long) var, delta, width) :
565
+ __shfl_up_sync(mask, (unsigned int) var, delta, width);
566
+ }
567
+
568
+ __SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width) {
569
+ return (sizeof(long) == sizeof(long long)) ?
570
+ __shfl_down_sync(mask, (long long) var, delta, width) :
571
+ __shfl_down_sync(mask, (int) var, delta, width);
572
+ }
573
+
574
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width) {
575
+ return (sizeof(long) == sizeof(long long)) ?
576
+ __shfl_down_sync(mask, (unsigned long long) var, delta, width) :
577
+ __shfl_down_sync(mask, (unsigned int) var, delta, width);
578
+ }
579
+
580
+ __SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width) {
581
+ return (sizeof(long) == sizeof(long long)) ?
582
+ __shfl_xor_sync(mask, (long long) var, laneMask, width) :
583
+ __shfl_xor_sync(mask, (int) var, laneMask, width);
584
+ }
585
+
586
+ __SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width) {
587
+ return (sizeof(long) == sizeof(long long)) ?
588
+ __shfl_xor_sync(mask, (unsigned long long) var, laneMask, width) :
589
+ __shfl_xor_sync(mask, (unsigned int) var, laneMask, width);
590
+ }
591
+
592
+ #if defined(__local_warpSize)
593
+ #undef warpSize
594
+ #undef __local_warpSize
595
+ #endif
596
+
597
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */
598
+
599
+ #endif /* __cplusplus && __CUDACC__ */
600
+
601
+ #undef __SM_30_INTRINSICS_DECL__
602
+
603
+ #endif /* !__SM_30_INTRINSICS_HPP__ */
604
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_atomic_functions.hpp ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.35.235 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __SM_32_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ /*******************************************************************************
72
+ * *
73
+ * *
74
+ * *
75
+ *******************************************************************************/
76
+
77
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMin(long long *address, long long val)
78
+ {
79
+ return __illAtomicMin(address, val);
80
+ }
81
+
82
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicMax(long long *address, long long val)
83
+ {
84
+ return __illAtomicMax(address, val);
85
+ }
86
+
87
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicAnd(long long *address, long long val)
88
+ {
89
+ return __llAtomicAnd(address, val);
90
+ }
91
+
92
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicOr(long long *address, long long val)
93
+ {
94
+ return __llAtomicOr(address, val);
95
+ }
96
+
97
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ long long atomicXor(long long *address, long long val)
98
+ {
99
+ return __llAtomicXor(address, val);
100
+ }
101
+
102
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMin(unsigned long long *address, unsigned long long val)
103
+ {
104
+ return __ullAtomicMin(address, val);
105
+ }
106
+
107
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicMax(unsigned long long *address, unsigned long long val)
108
+ {
109
+ return __ullAtomicMax(address, val);
110
+ }
111
+
112
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicAnd(unsigned long long *address, unsigned long long val)
113
+ {
114
+ return __ullAtomicAnd(address, val);
115
+ }
116
+
117
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicOr(unsigned long long *address, unsigned long long val)
118
+ {
119
+ return __ullAtomicOr(address, val);
120
+ }
121
+
122
+ __SM_32_ATOMIC_FUNCTIONS_DECL__ unsigned long long atomicXor(unsigned long long *address, unsigned long long val)
123
+ {
124
+ return __ullAtomicXor(address, val);
125
+ }
126
+
127
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
128
+
129
+ #endif /* __cplusplus && __CUDACC__ */
130
+
131
+ #undef __SM_32_ATOMIC_FUNCTIONS_DECL__
132
+
133
+ #endif /* !__SM_32_ATOMIC_FUNCTIONS_HPP__ */
134
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.h ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_INTRINSICS_H__)
51
+ #define __SM_32_INTRINSICS_H__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ #ifndef __CUDA_ARCH__
72
+ #define __DEF_IF_HOST { }
73
+ #else /* !__CUDA_ARCH__ */
74
+ #define __DEF_IF_HOST ;
75
+ #endif /* __CUDA_ARCH__ */
76
+
77
+
78
+ /*******************************************************************************
79
+ * *
80
+ * Below are declarations of SM-3.5 intrinsics which are included as *
81
+ * source (instead of being built in to the compiler) *
82
+ * *
83
+ *******************************************************************************/
84
+ /******************************************************************************
85
+ * __ldg *
86
+ ******************************************************************************/
87
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) __DEF_IF_HOST
88
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) __DEF_IF_HOST
89
+
90
+ __SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) __DEF_IF_HOST
91
+ __SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) __DEF_IF_HOST
92
+ __SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) __DEF_IF_HOST
93
+ __SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) __DEF_IF_HOST
94
+ __SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) __DEF_IF_HOST
95
+ __SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) __DEF_IF_HOST
96
+ __SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) __DEF_IF_HOST
97
+ __SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) __DEF_IF_HOST
98
+ __SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) __DEF_IF_HOST
99
+ __SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) __DEF_IF_HOST
100
+ __SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) __DEF_IF_HOST
101
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) __DEF_IF_HOST
102
+
103
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) __DEF_IF_HOST
104
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) __DEF_IF_HOST
105
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) __DEF_IF_HOST
106
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) __DEF_IF_HOST
107
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) __DEF_IF_HOST
108
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) __DEF_IF_HOST
109
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) __DEF_IF_HOST
110
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) __DEF_IF_HOST
111
+ __SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) __DEF_IF_HOST
112
+ __SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) __DEF_IF_HOST
113
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) __DEF_IF_HOST
114
+
115
+ __SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) __DEF_IF_HOST
116
+ __SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) __DEF_IF_HOST
117
+ __SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) __DEF_IF_HOST
118
+ __SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) __DEF_IF_HOST
119
+ __SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) __DEF_IF_HOST
120
+ /******************************************************************************
121
+ * __ldcg *
122
+ ******************************************************************************/
123
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) __DEF_IF_HOST
124
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) __DEF_IF_HOST
125
+
126
+ __SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) __DEF_IF_HOST
127
+ __SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) __DEF_IF_HOST
128
+ __SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) __DEF_IF_HOST
129
+ __SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) __DEF_IF_HOST
130
+ __SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) __DEF_IF_HOST
131
+ __SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) __DEF_IF_HOST
132
+ __SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) __DEF_IF_HOST
133
+ __SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) __DEF_IF_HOST
134
+ __SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) __DEF_IF_HOST
135
+ __SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) __DEF_IF_HOST
136
+ __SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) __DEF_IF_HOST
137
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) __DEF_IF_HOST
138
+
139
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) __DEF_IF_HOST
140
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) __DEF_IF_HOST
141
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) __DEF_IF_HOST
142
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) __DEF_IF_HOST
143
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) __DEF_IF_HOST
144
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) __DEF_IF_HOST
145
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) __DEF_IF_HOST
146
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) __DEF_IF_HOST
147
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) __DEF_IF_HOST
148
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) __DEF_IF_HOST
149
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) __DEF_IF_HOST
150
+
151
+ __SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) __DEF_IF_HOST
152
+ __SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) __DEF_IF_HOST
153
+ __SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) __DEF_IF_HOST
154
+ __SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) __DEF_IF_HOST
155
+ __SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) __DEF_IF_HOST
156
+ /******************************************************************************
157
+ * __ldca *
158
+ ******************************************************************************/
159
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) __DEF_IF_HOST
160
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) __DEF_IF_HOST
161
+
162
+ __SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) __DEF_IF_HOST
163
+ __SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) __DEF_IF_HOST
164
+ __SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) __DEF_IF_HOST
165
+ __SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) __DEF_IF_HOST
166
+ __SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) __DEF_IF_HOST
167
+ __SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) __DEF_IF_HOST
168
+ __SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) __DEF_IF_HOST
169
+ __SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) __DEF_IF_HOST
170
+ __SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) __DEF_IF_HOST
171
+ __SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) __DEF_IF_HOST
172
+ __SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) __DEF_IF_HOST
173
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) __DEF_IF_HOST
174
+
175
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) __DEF_IF_HOST
176
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) __DEF_IF_HOST
177
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) __DEF_IF_HOST
178
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) __DEF_IF_HOST
179
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) __DEF_IF_HOST
180
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) __DEF_IF_HOST
181
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) __DEF_IF_HOST
182
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) __DEF_IF_HOST
183
+ __SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) __DEF_IF_HOST
184
+ __SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) __DEF_IF_HOST
185
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) __DEF_IF_HOST
186
+
187
+ __SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) __DEF_IF_HOST
188
+ __SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) __DEF_IF_HOST
189
+ __SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) __DEF_IF_HOST
190
+ __SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) __DEF_IF_HOST
191
+ __SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) __DEF_IF_HOST
192
+ /******************************************************************************
193
+ * __ldcs *
194
+ ******************************************************************************/
195
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) __DEF_IF_HOST
196
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) __DEF_IF_HOST
197
+
198
+ __SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) __DEF_IF_HOST
199
+ __SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) __DEF_IF_HOST
200
+ __SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) __DEF_IF_HOST
201
+ __SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) __DEF_IF_HOST
202
+ __SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) __DEF_IF_HOST
203
+ __SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) __DEF_IF_HOST
204
+ __SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) __DEF_IF_HOST
205
+ __SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) __DEF_IF_HOST
206
+ __SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) __DEF_IF_HOST
207
+ __SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) __DEF_IF_HOST
208
+ __SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) __DEF_IF_HOST
209
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) __DEF_IF_HOST
210
+
211
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) __DEF_IF_HOST
212
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) __DEF_IF_HOST
213
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) __DEF_IF_HOST
214
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) __DEF_IF_HOST
215
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) __DEF_IF_HOST
216
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) __DEF_IF_HOST
217
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) __DEF_IF_HOST
218
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) __DEF_IF_HOST
219
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) __DEF_IF_HOST
220
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) __DEF_IF_HOST
221
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) __DEF_IF_HOST
222
+
223
+ __SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) __DEF_IF_HOST
224
+ __SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) __DEF_IF_HOST
225
+ __SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) __DEF_IF_HOST
226
+ __SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) __DEF_IF_HOST
227
+ __SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) __DEF_IF_HOST
228
+ /******************************************************************************
229
+ * __ldlu *
230
+ ******************************************************************************/
231
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) __DEF_IF_HOST
232
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) __DEF_IF_HOST
233
+
234
+ __SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) __DEF_IF_HOST
235
+ __SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) __DEF_IF_HOST
236
+ __SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) __DEF_IF_HOST
237
+ __SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) __DEF_IF_HOST
238
+ __SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) __DEF_IF_HOST
239
+ __SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) __DEF_IF_HOST
240
+ __SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) __DEF_IF_HOST
241
+ __SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) __DEF_IF_HOST
242
+ __SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) __DEF_IF_HOST
243
+ __SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) __DEF_IF_HOST
244
+ __SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) __DEF_IF_HOST
245
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) __DEF_IF_HOST
246
+
247
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) __DEF_IF_HOST
248
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) __DEF_IF_HOST
249
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) __DEF_IF_HOST
250
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) __DEF_IF_HOST
251
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) __DEF_IF_HOST
252
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) __DEF_IF_HOST
253
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) __DEF_IF_HOST
254
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) __DEF_IF_HOST
255
+ __SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) __DEF_IF_HOST
256
+ __SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) __DEF_IF_HOST
257
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) __DEF_IF_HOST
258
+
259
+ __SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) __DEF_IF_HOST
260
+ __SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) __DEF_IF_HOST
261
+ __SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) __DEF_IF_HOST
262
+ __SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) __DEF_IF_HOST
263
+ __SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) __DEF_IF_HOST
264
+ /******************************************************************************
265
+ * __ldcv *
266
+ ******************************************************************************/
267
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) __DEF_IF_HOST
268
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) __DEF_IF_HOST
269
+
270
+ __SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) __DEF_IF_HOST
271
+ __SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) __DEF_IF_HOST
272
+ __SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) __DEF_IF_HOST
273
+ __SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) __DEF_IF_HOST
274
+ __SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) __DEF_IF_HOST
275
+ __SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) __DEF_IF_HOST
276
+ __SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) __DEF_IF_HOST
277
+ __SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) __DEF_IF_HOST
278
+ __SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) __DEF_IF_HOST
279
+ __SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) __DEF_IF_HOST
280
+ __SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) __DEF_IF_HOST
281
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) __DEF_IF_HOST
282
+
283
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) __DEF_IF_HOST
284
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) __DEF_IF_HOST
285
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) __DEF_IF_HOST
286
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) __DEF_IF_HOST
287
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) __DEF_IF_HOST
288
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) __DEF_IF_HOST
289
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) __DEF_IF_HOST
290
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) __DEF_IF_HOST
291
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) __DEF_IF_HOST
292
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) __DEF_IF_HOST
293
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) __DEF_IF_HOST
294
+
295
+ __SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) __DEF_IF_HOST
296
+ __SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) __DEF_IF_HOST
297
+ __SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) __DEF_IF_HOST
298
+ __SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) __DEF_IF_HOST
299
+ __SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) __DEF_IF_HOST
300
+ /******************************************************************************
301
+ * __stwb *
302
+ ******************************************************************************/
303
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) __DEF_IF_HOST
304
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
305
+
306
+ __SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) __DEF_IF_HOST
307
+ __SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) __DEF_IF_HOST
308
+ __SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) __DEF_IF_HOST
309
+ __SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) __DEF_IF_HOST
310
+ __SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) __DEF_IF_HOST
311
+ __SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) __DEF_IF_HOST
312
+ __SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) __DEF_IF_HOST
313
+ __SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) __DEF_IF_HOST
314
+ __SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) __DEF_IF_HOST
315
+ __SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) __DEF_IF_HOST
316
+ __SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) __DEF_IF_HOST
317
+ __SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
318
+
319
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
320
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
321
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
322
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
323
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
324
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
325
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
326
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
327
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) __DEF_IF_HOST
328
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) __DEF_IF_HOST
329
+ __SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
330
+
331
+ __SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) __DEF_IF_HOST
332
+ __SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) __DEF_IF_HOST
333
+ __SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) __DEF_IF_HOST
334
+ __SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) __DEF_IF_HOST
335
+ __SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) __DEF_IF_HOST
336
+ /******************************************************************************
337
+ * __stcg *
338
+ ******************************************************************************/
339
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) __DEF_IF_HOST
340
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
341
+
342
+ __SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) __DEF_IF_HOST
343
+ __SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) __DEF_IF_HOST
344
+ __SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) __DEF_IF_HOST
345
+ __SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) __DEF_IF_HOST
346
+ __SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) __DEF_IF_HOST
347
+ __SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) __DEF_IF_HOST
348
+ __SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) __DEF_IF_HOST
349
+ __SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) __DEF_IF_HOST
350
+ __SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) __DEF_IF_HOST
351
+ __SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) __DEF_IF_HOST
352
+ __SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) __DEF_IF_HOST
353
+ __SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
354
+
355
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
356
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
357
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
358
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
359
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
360
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
361
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
362
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
363
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) __DEF_IF_HOST
364
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) __DEF_IF_HOST
365
+ __SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
366
+
367
+ __SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) __DEF_IF_HOST
368
+ __SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) __DEF_IF_HOST
369
+ __SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) __DEF_IF_HOST
370
+ __SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) __DEF_IF_HOST
371
+ __SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) __DEF_IF_HOST
372
+ /******************************************************************************
373
+ * __stcs *
374
+ ******************************************************************************/
375
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) __DEF_IF_HOST
376
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
377
+
378
+ __SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) __DEF_IF_HOST
379
+ __SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) __DEF_IF_HOST
380
+ __SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) __DEF_IF_HOST
381
+ __SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) __DEF_IF_HOST
382
+ __SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) __DEF_IF_HOST
383
+ __SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) __DEF_IF_HOST
384
+ __SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) __DEF_IF_HOST
385
+ __SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) __DEF_IF_HOST
386
+ __SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) __DEF_IF_HOST
387
+ __SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) __DEF_IF_HOST
388
+ __SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) __DEF_IF_HOST
389
+ __SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
390
+
391
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
392
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
393
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
394
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
395
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
396
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
397
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
398
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
399
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) __DEF_IF_HOST
400
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) __DEF_IF_HOST
401
+ __SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
402
+
403
+ __SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) __DEF_IF_HOST
404
+ __SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) __DEF_IF_HOST
405
+ __SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) __DEF_IF_HOST
406
+ __SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) __DEF_IF_HOST
407
+ __SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) __DEF_IF_HOST
408
+ /******************************************************************************
409
+ * __stwt *
410
+ ******************************************************************************/
411
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) __DEF_IF_HOST
412
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) __DEF_IF_HOST
413
+
414
+ __SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) __DEF_IF_HOST
415
+ __SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) __DEF_IF_HOST
416
+ __SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) __DEF_IF_HOST
417
+ __SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) __DEF_IF_HOST
418
+ __SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) __DEF_IF_HOST
419
+ __SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) __DEF_IF_HOST
420
+ __SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) __DEF_IF_HOST
421
+ __SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) __DEF_IF_HOST
422
+ __SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) __DEF_IF_HOST
423
+ __SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) __DEF_IF_HOST
424
+ __SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) __DEF_IF_HOST
425
+ __SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) __DEF_IF_HOST
426
+
427
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) __DEF_IF_HOST
428
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) __DEF_IF_HOST
429
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) __DEF_IF_HOST
430
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) __DEF_IF_HOST
431
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) __DEF_IF_HOST
432
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) __DEF_IF_HOST
433
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) __DEF_IF_HOST
434
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) __DEF_IF_HOST
435
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) __DEF_IF_HOST
436
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) __DEF_IF_HOST
437
+ __SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) __DEF_IF_HOST
438
+
439
+ __SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) __DEF_IF_HOST
440
+ __SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) __DEF_IF_HOST
441
+ __SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) __DEF_IF_HOST
442
+ __SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) __DEF_IF_HOST
443
+ __SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) __DEF_IF_HOST
444
+
445
+
446
+ // SHF is the "funnel shift" operation - an accelerated left/right shift with carry
447
+ // operating on 64-bit quantities, which are concatenations of two 32-bit registers.
448
+
449
+ /**
450
+ * \ingroup CUDA_MATH_INTRINSIC_INT
451
+ * \brief Concatenate \p hi : \p lo, shift left by \p shift & 31 bits, return the most significant 32 bits.
452
+ *
453
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi left by the amount specified by the argument \p shift.
454
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
455
+ * The source is shifted left by the wrapped value of \p shift (\p shift & 31).
456
+ * The most significant 32-bits of the result are returned.
457
+ *
458
+ * \return Returns the most significant 32 bits of the shifted 64-bit value.
459
+ */
460
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
461
+ /**
462
+ * \ingroup CUDA_MATH_INTRINSIC_INT
463
+ * \brief Concatenate \p hi : \p lo, shift left by min(\p shift, 32) bits, return the most significant 32 bits.
464
+ *
465
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi left by the amount specified by the argument \p shift.
466
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
467
+ * The source is shifted left by the clamped value of \p shift (min(\p shift, 32)).
468
+ * The most significant 32-bits of the result are returned.
469
+ *
470
+ * \return Returns the most significant 32 bits of the shifted 64-bit value.
471
+ */
472
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
473
+
474
+ /**
475
+ * \ingroup CUDA_MATH_INTRINSIC_INT
476
+ * \brief Concatenate \p hi : \p lo, shift right by \p shift & 31 bits, return the least significant 32 bits.
477
+ *
478
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi right by the amount specified by the argument \p shift.
479
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
480
+ * The source is shifted right by the wrapped value of \p shift (\p shift & 31).
481
+ * The least significant 32-bits of the result are returned.
482
+ *
483
+ * \return Returns the least significant 32 bits of the shifted 64-bit value.
484
+ */
485
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
486
+ /**
487
+ * \ingroup CUDA_MATH_INTRINSIC_INT
488
+ * \brief Concatenate \p hi : \p lo, shift right by min(\p shift, 32) bits, return the least significant 32 bits.
489
+ *
490
+ * Shift the 64-bit value formed by concatenating argument \p lo and \p hi right by the amount specified by the argument \p shift.
491
+ * Argument \p lo holds bits 31:0 and argument \p hi holds bits 63:32 of the 64-bit source value.
492
+ * The source is shifted right by the clamped value of \p shift (min(\p shift, 32)).
493
+ * The least significant 32-bits of the result are returned.
494
+ *
495
+ * \return Returns the least significant 32 bits of the shifted 64-bit value.
496
+ */
497
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift) __DEF_IF_HOST
498
+
499
+
500
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
501
+
502
+ #endif /* __cplusplus && __CUDACC__ */
503
+
504
+ #undef __SM_32_INTRINSICS_DECL__
505
+
506
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
507
+ #include "sm_32_intrinsics.hpp"
508
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
509
+
510
+ #endif /* !__SM_32_INTRINSICS_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp ADDED
@@ -0,0 +1,588 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_32_INTRINSICS_HPP__)
51
+ #define __SM_32_INTRINSICS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_32_INTRINSICS_DECL__ __device__
55
+ #else /* !__CUDACC_RTC__ */
56
+ #define __SM_32_INTRINSICS_DECL__ static __device__ __inline__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ // In here are intrinsics which are built in to the compiler. These may be
72
+ // referenced by intrinsic implementations from this file.
73
+ extern "C"
74
+ {
75
+ // There are no intrinsics built in to the compiler for SM-3.5,
76
+ // all intrinsics are now implemented as inline PTX below.
77
+ }
78
+
79
+ /*******************************************************************************
80
+ * *
81
+ * Below are implementations of SM-3.5 intrinsics which are included as *
82
+ * source (instead of being built in to the compiler) *
83
+ * *
84
+ *******************************************************************************/
85
+
86
+ // LDG is a "load from global via texture path" command which can exhibit higher
87
+ // bandwidth on GK110 than a regular LD.
88
+ // Define a different pointer storage size for 64 and 32 bit
89
+ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__)
90
+ #define __LDG_PTR "l"
91
+ #else
92
+ #define __LDG_PTR "r"
93
+ #endif
94
+
95
+ /******************************************************************************
96
+ * __ldg *
97
+ ******************************************************************************/
98
+
99
+ // Size of long is architecture and OS specific.
100
+ #if defined(__LP64__) // 64 bits
101
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
102
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
103
+ #else // 32 bits
104
+ __SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
105
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
106
+ #endif
107
+
108
+
109
+ __SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
110
+ __SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
111
+ __SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
112
+ __SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
113
+ __SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
114
+ __SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.nc.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
115
+ __SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.nc.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
116
+ __SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.nc.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
117
+ __SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.nc.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
118
+ __SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.nc.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
119
+ __SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.nc.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
120
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.nc.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
121
+
122
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
123
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
124
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
125
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
126
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.nc.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
127
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.nc.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
128
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.nc.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
129
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.nc.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
130
+ __SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.nc.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
131
+ __SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
132
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
133
+
134
+ __SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) { float ret; asm volatile ("ld.global.nc.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
135
+ __SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) { double ret; asm volatile ("ld.global.nc.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
136
+ __SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.nc.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
137
+ __SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.nc.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
138
+ __SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.nc.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
139
+
140
+
141
+ /******************************************************************************
142
+ * __ldcg *
143
+ ******************************************************************************/
144
+
145
+ // Size of long is architecture and OS specific.
146
+ #if defined(__LP64__) // 64 bits
147
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
148
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
149
+ #else // 32 bits
150
+ __SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
151
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
152
+ #endif
153
+
154
+
155
+ __SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
156
+ __SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
157
+ __SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
158
+ __SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
159
+ __SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
160
+ __SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cg.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
161
+ __SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cg.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
162
+ __SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cg.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
163
+ __SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cg.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
164
+ __SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cg.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
165
+ __SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cg.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
166
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cg.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
167
+
168
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
169
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
170
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
171
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
172
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cg.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
173
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cg.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
174
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cg.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
175
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cg.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
176
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cg.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
177
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
178
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cg.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
179
+
180
+ __SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) { float ret; asm volatile ("ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
181
+ __SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) { double ret; asm volatile ("ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
182
+ __SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cg.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
183
+ __SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cg.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
184
+ __SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cg.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
185
+
186
+ /******************************************************************************
187
+ * __ldca *
188
+ ******************************************************************************/
189
+
190
+ // Size of long is architecture and OS specific.
191
+ #if defined(__LP64__) // 64 bits
192
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
193
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
194
+ #else // 32 bits
195
+ __SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
196
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
197
+ #endif
198
+
199
+
200
+ __SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
201
+ __SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
202
+ __SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
203
+ __SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
204
+ __SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
205
+ __SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.ca.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
206
+ __SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.ca.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
207
+ __SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) { short2 ret; asm volatile ("ld.global.ca.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
208
+ __SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) { short4 ret; asm volatile ("ld.global.ca.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
209
+ __SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) { int2 ret; asm volatile ("ld.global.ca.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
210
+ __SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) { int4 ret; asm volatile ("ld.global.ca.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
211
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.ca.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
212
+
213
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
214
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
215
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
216
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
217
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.ca.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
218
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.ca.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
219
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.ca.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
220
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.ca.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
221
+ __SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.ca.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
222
+ __SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.ca.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
223
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.ca.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
224
+
225
+ __SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) { float ret; asm volatile ("ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
226
+ __SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) { double ret; asm volatile ("ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
227
+ __SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) { float2 ret; asm volatile ("ld.global.ca.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
228
+ __SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) { float4 ret; asm volatile ("ld.global.ca.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
229
+ __SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) { double2 ret; asm volatile ("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
230
+
231
+ /******************************************************************************
232
+ * __ldcs *
233
+ ******************************************************************************/
234
+
235
+ // Size of long is architecture and OS specific.
236
+ #if defined(__LP64__) // 64 bits
237
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; }
238
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
239
+ #else // 32 bits
240
+ __SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; }
241
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
242
+ #endif
243
+
244
+
245
+ __SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; }
246
+ __SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; }
247
+ __SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; }
248
+ __SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; }
249
+ __SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; }
250
+ __SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cs.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
251
+ __SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cs.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
252
+ __SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cs.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
253
+ __SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cs.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
254
+ __SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cs.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
255
+ __SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cs.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
256
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cs.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
257
+
258
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; }
259
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; }
260
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; }
261
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; }
262
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cs.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
263
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cs.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
264
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cs.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; }
265
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cs.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; }
266
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cs.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; }
267
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cs.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; }
268
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cs.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; }
269
+
270
+ __SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) { float ret; asm volatile ("ld.global.cs.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; }
271
+ __SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) { double ret; asm volatile ("ld.global.cs.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; }
272
+ __SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cs.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; }
273
+ __SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cs.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; }
274
+ __SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; }
275
+
276
+ /******************************************************************************
277
+ * __ldlu *
278
+ ******************************************************************************/
279
+
280
+ // Size of long is architecture and OS specific.
281
+ #if defined(__LP64__) // 64 bits
282
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
283
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
284
+ #else // 32 bits
285
+ __SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
286
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
287
+ #endif
288
+
289
+
290
+ __SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
291
+ __SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
292
+ __SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) { unsigned short ret; asm ("ld.global.lu.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
293
+ __SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) { unsigned int ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
294
+ __SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) { unsigned long long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
295
+ __SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.lu.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
296
+ __SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.lu.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
297
+ __SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) { short2 ret; asm ("ld.global.lu.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
298
+ __SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) { short4 ret; asm ("ld.global.lu.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
299
+ __SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) { int2 ret; asm ("ld.global.lu.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
300
+ __SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) { int4 ret; asm ("ld.global.lu.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
301
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.lu.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
302
+
303
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.lu.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
304
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.lu.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
305
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
306
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
307
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.lu.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
308
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.lu.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
309
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.lu.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
310
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.lu.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
311
+ __SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) { uint2 ret; asm ("ld.global.lu.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
312
+ __SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) { uint4 ret; asm ("ld.global.lu.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
313
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.lu.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
314
+
315
+ __SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) { float ret; asm ("ld.global.lu.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
316
+ __SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) { double ret; asm ("ld.global.lu.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
317
+ __SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) { float2 ret; asm ("ld.global.lu.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
318
+ __SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) { float4 ret; asm ("ld.global.lu.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
319
+ __SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) { double2 ret; asm ("ld.global.lu.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
320
+
321
+ /******************************************************************************
322
+ * __ldcv *
323
+ ******************************************************************************/
324
+
325
+ // Size of long is architecture and OS specific.
326
+ #if defined(__LP64__) // 64 bits
327
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
328
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
329
+ #else // 32 bits
330
+ __SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; }
331
+ __SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
332
+ #endif
333
+
334
+
335
+ __SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; }
336
+ __SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; }
337
+ __SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) { unsigned short ret; asm ("ld.global.cv.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; }
338
+ __SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) { unsigned int ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; }
339
+ __SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) { unsigned long long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; }
340
+ __SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.cv.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; }
341
+ __SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.cv.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; }
342
+ __SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) { short2 ret; asm ("ld.global.cv.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
343
+ __SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) { short4 ret; asm ("ld.global.cv.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
344
+ __SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) { int2 ret; asm ("ld.global.cv.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
345
+ __SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) { int4 ret; asm ("ld.global.cv.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
346
+ __SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.cv.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
347
+
348
+ __SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.cv.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; }
349
+ __SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.cv.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
350
+ __SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
351
+ __SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
352
+ __SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.cv.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; }
353
+ __SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.cv.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; }
354
+ __SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.cv.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
355
+ __SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.cv.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
356
+ __SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) { uint2 ret; asm ("ld.global.cv.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
357
+ __SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) { uint4 ret; asm ("ld.global.cv.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
358
+ __SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.cv.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
359
+
360
+ __SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) { float ret; asm ("ld.global.cv.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
361
+ __SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) { double ret; asm ("ld.global.cv.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; }
362
+ __SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) { float2 ret; asm ("ld.global.cv.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
363
+ __SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) { float4 ret; asm ("ld.global.cv.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; }
364
+ __SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) { double2 ret; asm ("ld.global.cv.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; }
365
+
366
+ /******************************************************************************
367
+ * __stwb *
368
+ ******************************************************************************/
369
+
370
+ // Size of long is architecture and OS specific.
371
+ #if defined(__LP64__) // 64 bits
372
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
373
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
374
+ #else // 32 bits
375
+ __SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
376
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
377
+ #endif
378
+
379
+
380
+ __SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
381
+ __SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
382
+ __SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) { asm ("st.global.wb.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
383
+ __SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
384
+ __SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
385
+ __SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
386
+ __SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
387
+ __SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) { asm ("st.global.wb.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
388
+ __SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) { asm ("st.global.wb.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
389
+ __SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) { asm ("st.global.wb.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
390
+ __SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) { asm ("st.global.wb.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
391
+ __SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) { asm ("st.global.wb.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
392
+
393
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) { asm ("st.global.wb.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
394
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) { asm ("st.global.wb.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
395
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
396
+ __SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
397
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
398
+ __SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
399
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) { asm ("st.global.wb.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
400
+ __SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) { asm ("st.global.wb.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
401
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) { asm ("st.global.wb.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
402
+ __SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) { asm ("st.global.wb.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
403
+ __SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wb.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
404
+
405
+ __SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) { asm ("st.global.wb.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
406
+ __SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) { asm ("st.global.wb.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
407
+ __SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) { asm ("st.global.wb.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
408
+ __SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) { asm ("st.global.wb.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
409
+ __SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) { asm ("st.global.wb.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
410
+
411
+ /******************************************************************************
412
+ * __stcg *
413
+ ******************************************************************************/
414
+
415
+ // Size of long is architecture and OS specific.
416
+ #if defined(__LP64__) // 64 bits
417
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
418
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
419
+ #else // 32 bits
420
+ __SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
421
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
422
+ #endif
423
+
424
+
425
+ __SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
426
+ __SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
427
+ __SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) { asm ("st.global.cg.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
428
+ __SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
429
+ __SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
430
+ __SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
431
+ __SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
432
+ __SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) { asm ("st.global.cg.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
433
+ __SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) { asm ("st.global.cg.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
434
+ __SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) { asm ("st.global.cg.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
435
+ __SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) { asm ("st.global.cg.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
436
+ __SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) { asm ("st.global.cg.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
437
+
438
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) { asm ("st.global.cg.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
439
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) { asm ("st.global.cg.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
440
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
441
+ __SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
442
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
443
+ __SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
444
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) { asm ("st.global.cg.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
445
+ __SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) { asm ("st.global.cg.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
446
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) { asm ("st.global.cg.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
447
+ __SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) { asm ("st.global.cg.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
448
+ __SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cg.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
449
+
450
+ __SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) { asm ("st.global.cg.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
451
+ __SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) { asm ("st.global.cg.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
452
+ __SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) { asm ("st.global.cg.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
453
+ __SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) { asm ("st.global.cg.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
454
+ __SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) { asm ("st.global.cg.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
455
+
456
+ /******************************************************************************
457
+ * __stcs *
458
+ ******************************************************************************/
459
+
460
+ // Size of long is architecture and OS specific.
461
+ #if defined(__LP64__) // 64 bits
462
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
463
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
464
+ #else // 32 bits
465
+ __SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
466
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
467
+ #endif
468
+
469
+
470
+ __SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
471
+ __SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
472
+ __SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) { asm ("st.global.cs.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
473
+ __SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
474
+ __SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
475
+ __SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
476
+ __SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
477
+ __SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) { asm ("st.global.cs.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
478
+ __SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) { asm ("st.global.cs.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
479
+ __SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) { asm ("st.global.cs.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
480
+ __SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) { asm ("st.global.cs.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
481
+ __SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) { asm ("st.global.cs.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
482
+
483
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) { asm ("st.global.cs.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
484
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) { asm ("st.global.cs.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
485
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
486
+ __SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
487
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
488
+ __SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
489
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) { asm ("st.global.cs.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
490
+ __SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) { asm ("st.global.cs.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
491
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) { asm ("st.global.cs.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
492
+ __SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) { asm ("st.global.cs.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
493
+ __SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cs.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
494
+
495
+ __SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) { asm ("st.global.cs.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
496
+ __SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) { asm ("st.global.cs.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
497
+ __SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) { asm ("st.global.cs.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
498
+ __SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) { asm ("st.global.cs.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
499
+ __SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) { asm ("st.global.cs.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
500
+
501
+ /******************************************************************************
502
+ * __stwt *
503
+ ******************************************************************************/
504
+
505
+ // Size of long is architecture and OS specific.
506
+ #if defined(__LP64__) // 64 bits
507
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
508
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
509
+ #else // 32 bits
510
+ __SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
511
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
512
+ #endif
513
+
514
+
515
+ __SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
516
+ __SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
517
+ __SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) { asm ("st.global.wt.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
518
+ __SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
519
+ __SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
520
+ __SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
521
+ __SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
522
+ __SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) { asm ("st.global.wt.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
523
+ __SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) { asm ("st.global.wt.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
524
+ __SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) { asm ("st.global.wt.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
525
+ __SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) { asm ("st.global.wt.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
526
+ __SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) { asm ("st.global.wt.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
527
+
528
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) { asm ("st.global.wt.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); }
529
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) { asm ("st.global.wt.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); }
530
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); }
531
+ __SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); }
532
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); }
533
+ __SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); }
534
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) { asm ("st.global.wt.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); }
535
+ __SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) { asm ("st.global.wt.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); }
536
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) { asm ("st.global.wt.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); }
537
+ __SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) { asm ("st.global.wt.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); }
538
+ __SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wt.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); }
539
+
540
+ __SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) { asm ("st.global.wt.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); }
541
+ __SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) { asm ("st.global.wt.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); }
542
+ __SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) { asm ("st.global.wt.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); }
543
+ __SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) { asm ("st.global.wt.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); }
544
+ __SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) { asm ("st.global.wt.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); }
545
+
546
+ #undef __LDG_PTR
547
+
548
+
549
+ // SHF is the "funnel shift" operation - an accelerated left/right shift with carry
550
+ // operating on 64-bit quantities, which are concatenations of two 32-bit registers.
551
+
552
+ // This shifts [b:a] left by "shift" bits, returning the most significant bits of the result.
553
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift)
554
+ {
555
+ unsigned int ret;
556
+ asm volatile ("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
557
+ return ret;
558
+ }
559
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift)
560
+ {
561
+ unsigned int ret;
562
+ asm volatile ("shf.l.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
563
+ return ret;
564
+ }
565
+
566
+ // This shifts [b:a] right by "shift" bits, returning the least significant bits of the result.
567
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift)
568
+ {
569
+ unsigned int ret;
570
+ asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
571
+ return ret;
572
+ }
573
+ __SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift)
574
+ {
575
+ unsigned int ret;
576
+ asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift));
577
+ return ret;
578
+ }
579
+
580
+
581
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */
582
+
583
+ #endif /* __cplusplus && __CUDACC__ */
584
+
585
+ #undef __SM_32_INTRINSICS_DECL__
586
+
587
+ #endif /* !__SM_32_INTRINSICS_HPP__ */
588
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_35_atomic_functions.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 35.235 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.35.235 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_35_ATOMIC_FUNCTIONS_H__)
51
+ #define __SM_35_ATOMIC_FUNCTIONS_H__
52
+
53
+ /*******************************************************************************
54
+ * All sm_35 atomics are supported by sm_32 so simply include its header file *
55
+ *******************************************************************************/
56
+ #include "sm_32_atomic_functions.h"
57
+
58
+ #endif /* !__SM_35_ATOMIC_FUNCTIONS_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_60_ATOMIC_FUNCTIONS_H__)
51
+ #define __SM_60_ATOMIC_FUNCTIONS_H__
52
+
53
+
54
+ #if defined(__CUDACC_RTC__)
55
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__
56
+ #else /* __CUDACC_RTC__ */
57
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
58
+ #endif /* __CUDACC_RTC__ */
59
+
60
+ #if defined(__cplusplus) && defined(__CUDACC__)
61
+
62
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
63
+
64
+ /*******************************************************************************
65
+ * *
66
+ * *
67
+ * *
68
+ *******************************************************************************/
69
+
70
+ #include "cuda_runtime_api.h"
71
+
72
+ #ifndef __CUDA_ARCH__
73
+ #define __DEF_IF_HOST { }
74
+ #else /* !__CUDA_ARCH__ */
75
+ #define __DEF_IF_HOST ;
76
+ #endif /* __CUDA_ARCH__ */
77
+
78
+
79
+
80
+ #ifdef __CUDA_ARCH__
81
+ extern "C"
82
+ {
83
+ extern __device__ __device_builtin__ double __dAtomicAdd(double *address, double val);
84
+
85
+ extern __device__ __device_builtin__
86
+ int __iAtomicAdd_block(int *address, int val);
87
+
88
+ extern __device__ __device_builtin__
89
+ int __iAtomicAdd_system(int *address, int val);
90
+
91
+ extern __device__ __device_builtin__
92
+ unsigned int __uAtomicAdd_block(unsigned int *address, unsigned int val);
93
+
94
+ extern __device__ __device_builtin__
95
+ unsigned int __uAtomicAdd_system(unsigned int *address, unsigned int val);
96
+
97
+ extern __device__ __device_builtin__
98
+ unsigned long long __ullAtomicAdd_block(unsigned long long *address, unsigned long long val);
99
+
100
+ extern __device__ __device_builtin__
101
+ unsigned long long __ullAtomicAdd_system(unsigned long long *address, unsigned long long val);
102
+
103
+ extern __device__ __device_builtin__
104
+ float __fAtomicAdd_block(float *address, float val);
105
+
106
+ extern __device__ __device_builtin__
107
+ float __fAtomicAdd_system(float *address, float val);
108
+
109
+ extern __device__ __device_builtin__
110
+ double __dAtomicAdd_block(double *address, double val);
111
+
112
+ extern __device__ __device_builtin__
113
+ double __dAtomicAdd_system(double *address, double val);
114
+
115
+ extern __device__ __device_builtin__
116
+ int __iAtomicExch_block(int *address, int val);
117
+
118
+ extern __device__ __device_builtin__
119
+ int __iAtomicExch_system(int *address, int val);
120
+
121
+ extern __device__ __device_builtin__
122
+ unsigned int __uAtomicExch_block(unsigned int *address, unsigned int val);
123
+
124
+ extern __device__ __device_builtin__
125
+ unsigned int __uAtomicExch_system(unsigned int *address, unsigned int val);
126
+
127
+ extern __device__ __device_builtin__
128
+ unsigned long long __ullAtomicExch_block(unsigned long long *address, unsigned long long val);
129
+
130
+ extern __device__ __device_builtin__
131
+ unsigned long long __ullAtomicExch_system(unsigned long long *address, unsigned long long val);
132
+
133
+ extern __device__ __device_builtin__
134
+ float __fAtomicExch_block(float *address, float val);
135
+
136
+ extern __device__ __device_builtin__
137
+ float __fAtomicExch_system(float *address, float val);
138
+
139
+ extern __device__ __device_builtin__
140
+ int __iAtomicMin_block(int *address, int val);
141
+
142
+ extern __device__ __device_builtin__
143
+ int __iAtomicMin_system(int *address, int val);
144
+
145
+ extern __device__ __device_builtin__
146
+ long long __illAtomicMin_block(long long *address, long long val);
147
+
148
+ extern __device__ __device_builtin__
149
+ long long __illAtomicMin_system(long long *address, long long val);
150
+
151
+ extern __device__ __device_builtin__
152
+ unsigned int __uAtomicMin_block(unsigned int *address, unsigned int val);
153
+
154
+ extern __device__ __device_builtin__
155
+ unsigned int __uAtomicMin_system(unsigned int *address, unsigned int val);
156
+
157
+ extern __device__ __device_builtin__
158
+ unsigned long long __ullAtomicMin_block(unsigned long long *address, unsigned long long val);
159
+
160
+ extern __device__ __device_builtin__
161
+ unsigned long long __ullAtomicMin_system(unsigned long long *address, unsigned long long val);
162
+
163
+ extern __device__ __device_builtin__
164
+ int __iAtomicMax_block(int *address, int val);
165
+
166
+ extern __device__ __device_builtin__
167
+ int __iAtomicMax_system(int *address, int val);
168
+
169
+ extern __device__ __device_builtin__
170
+ long long __illAtomicMax_block(long long *address, long long val);
171
+
172
+ extern __device__ __device_builtin__
173
+ long long __illAtomicMax_system(long long *address, long long val);
174
+
175
+ extern __device__ __device_builtin__
176
+ unsigned int __uAtomicMax_block(unsigned int *address, unsigned int val);
177
+
178
+ extern __device__ __device_builtin__
179
+ unsigned int __uAtomicMax_system(unsigned int *address, unsigned int val);
180
+
181
+ extern __device__ __device_builtin__
182
+ unsigned long long __ullAtomicMax_block(unsigned long long *address, unsigned long long val);
183
+
184
+ extern __device__ __device_builtin__
185
+ unsigned long long __ullAtomicMax_system(unsigned long long *address, unsigned long long val);
186
+
187
+ extern __device__ __device_builtin__
188
+ unsigned int __uAtomicInc_block(unsigned int *address, unsigned int val);
189
+
190
+ extern __device__ __device_builtin__
191
+ unsigned int __uAtomicInc_system(unsigned int *address, unsigned int val);
192
+
193
+ extern __device__ __device_builtin__
194
+ unsigned int __uAtomicDec_block(unsigned int *address, unsigned int val);
195
+
196
+ extern __device__ __device_builtin__
197
+ unsigned int __uAtomicDec_system(unsigned int *address, unsigned int val);
198
+
199
+ extern __device__ __device_builtin__
200
+ int __iAtomicCAS_block(int *address, int compare, int val);
201
+
202
+ extern __device__ __device_builtin__
203
+ int __iAtomicCAS_system(int *address, int compare, int val);
204
+
205
+ extern __device__ __device_builtin__
206
+ unsigned int __uAtomicCAS_block(unsigned int *address, unsigned int compare,
207
+ unsigned int val);
208
+
209
+ extern __device__ __device_builtin__
210
+ unsigned int __uAtomicCAS_system(unsigned int *address, unsigned int compare,
211
+ unsigned int val);
212
+
213
+ extern __device__ __device_builtin__
214
+ unsigned long long __ullAtomicCAS_block(unsigned long long int *address,
215
+ unsigned long long int compare,
216
+ unsigned long long int val);
217
+
218
+ extern __device__ __device_builtin__
219
+ unsigned long long __ullAtomicCAS_system(unsigned long long int *address,
220
+ unsigned long long int compare,
221
+ unsigned long long int val);
222
+
223
+ extern __device__ __device_builtin__
224
+ int __iAtomicAnd_block(int *address, int val);
225
+
226
+ extern __device__ __device_builtin__
227
+ int __iAtomicAnd_system(int *address, int val);
228
+
229
+ extern __device__ __device_builtin__
230
+ long long __llAtomicAnd_block(long long *address, long long val);
231
+
232
+ extern __device__ __device_builtin__
233
+ long long __llAtomicAnd_system(long long *address, long long val);
234
+
235
+ extern __device__ __device_builtin__
236
+ unsigned int __uAtomicAnd_block(unsigned int *address, unsigned int val);
237
+
238
+ extern __device__ __device_builtin__
239
+ unsigned int __uAtomicAnd_system(unsigned int *address, unsigned int val);
240
+
241
+ extern __device__ __device_builtin__
242
+ unsigned long long __ullAtomicAnd_block(unsigned long long *address, unsigned long long val);
243
+
244
+ extern __device__ __device_builtin__
245
+ unsigned long long __ullAtomicAnd_system(unsigned long long *address, unsigned long long val);
246
+
247
+ extern __device__ __device_builtin__
248
+ int __iAtomicOr_block(int *address, int val);
249
+
250
+ extern __device__ __device_builtin__
251
+ int __iAtomicOr_system(int *address, int val);
252
+
253
+ extern __device__ __device_builtin__
254
+ long long __llAtomicOr_block(long long *address, long long val);
255
+
256
+ extern __device__ __device_builtin__
257
+ long long __llAtomicOr_system(long long *address, long long val);
258
+
259
+ extern __device__ __device_builtin__
260
+ unsigned int __uAtomicOr_block(unsigned int *address, unsigned int val);
261
+
262
+ extern __device__ __device_builtin__
263
+ unsigned int __uAtomicOr_system(unsigned int *address, unsigned int val);
264
+
265
+ extern __device__ __device_builtin__
266
+ unsigned long long __ullAtomicOr_block(unsigned long long *address, unsigned long long val);
267
+
268
+ extern __device__ __device_builtin__
269
+ unsigned long long __ullAtomicOr_system(unsigned long long *address, unsigned long long val);
270
+
271
+ extern __device__ __device_builtin__
272
+ int __iAtomicXor_block(int *address, int val);
273
+
274
+ extern __device__ __device_builtin__
275
+ int __iAtomicXor_system(int *address, int val);
276
+
277
+ extern __device__ __device_builtin__
278
+ long long __llAtomicXor_block(long long *address, long long val);
279
+
280
+ extern __device__ __device_builtin__
281
+ long long __llAtomicXor_system(long long *address, long long val);
282
+
283
+ extern __device__ __device_builtin__
284
+ unsigned int __uAtomicXor_block(unsigned int *address, unsigned int val);
285
+
286
+ extern __device__ __device_builtin__
287
+ unsigned int __uAtomicXor_system(unsigned int *address, unsigned int val);
288
+
289
+ extern __device__ __device_builtin__
290
+ unsigned long long __ullAtomicXor_block(unsigned long long *address, unsigned long long val);
291
+
292
+ extern __device__ __device_builtin__
293
+ unsigned long long __ullAtomicXor_system(unsigned long long *address, unsigned long long val);
294
+ }
295
+ #endif /* __CUDA_ARCH__ */
296
+
297
+ /*******************************************************************************
298
+ * *
299
+ * *
300
+ * *
301
+ *******************************************************************************/
302
+
303
+ __SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) __DEF_IF_HOST
304
+
305
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
306
+ int atomicAdd_block(int *address, int val) __DEF_IF_HOST
307
+
308
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
309
+ int atomicAdd_system(int *address, int val) __DEF_IF_HOST
310
+
311
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
312
+ unsigned int atomicAdd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
313
+
314
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
315
+ unsigned int atomicAdd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
316
+
317
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
318
+ unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
319
+
320
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
321
+ unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
322
+
323
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
324
+ float atomicAdd_block(float *address, float val) __DEF_IF_HOST
325
+
326
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
327
+ float atomicAdd_system(float *address, float val) __DEF_IF_HOST
328
+
329
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
330
+ double atomicAdd_block(double *address, double val) __DEF_IF_HOST
331
+
332
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
333
+ double atomicAdd_system(double *address, double val) __DEF_IF_HOST
334
+
335
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
336
+ int atomicSub_block(int *address, int val) __DEF_IF_HOST
337
+
338
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
339
+ int atomicSub_system(int *address, int val) __DEF_IF_HOST
340
+
341
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
342
+ unsigned int atomicSub_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
343
+
344
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
345
+ unsigned int atomicSub_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
346
+
347
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
348
+ int atomicExch_block(int *address, int val) __DEF_IF_HOST
349
+
350
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
351
+ int atomicExch_system(int *address, int val) __DEF_IF_HOST
352
+
353
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
354
+ unsigned int atomicExch_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
355
+
356
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
357
+ unsigned int atomicExch_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
358
+
359
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
360
+ unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
361
+
362
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
363
+ unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
364
+
365
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
366
+ float atomicExch_block(float *address, float val) __DEF_IF_HOST
367
+
368
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
369
+ float atomicExch_system(float *address, float val) __DEF_IF_HOST
370
+
371
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
372
+ int atomicMin_block(int *address, int val) __DEF_IF_HOST
373
+
374
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
375
+ int atomicMin_system(int *address, int val) __DEF_IF_HOST
376
+
377
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
378
+ long long atomicMin_block(long long *address, long long val) __DEF_IF_HOST
379
+
380
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
381
+ long long atomicMin_system(long long *address, long long val) __DEF_IF_HOST
382
+
383
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
384
+ unsigned int atomicMin_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
385
+
386
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
387
+ unsigned int atomicMin_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
388
+
389
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
390
+ unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
391
+
392
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
393
+ unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
394
+
395
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
396
+ int atomicMax_block(int *address, int val) __DEF_IF_HOST
397
+
398
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
399
+ int atomicMax_system(int *address, int val) __DEF_IF_HOST
400
+
401
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
402
+ long long atomicMax_block(long long *address, long long val) __DEF_IF_HOST
403
+
404
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
405
+ long long atomicMax_system(long long *address, long long val) __DEF_IF_HOST
406
+
407
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
408
+ unsigned int atomicMax_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
409
+
410
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
411
+ unsigned int atomicMax_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
412
+
413
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
414
+ unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
415
+
416
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
417
+ unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
418
+
419
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
420
+ unsigned int atomicInc_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
421
+
422
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
423
+ unsigned int atomicInc_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
424
+
425
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
426
+ unsigned int atomicDec_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
427
+
428
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
429
+ unsigned int atomicDec_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
430
+
431
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
432
+ int atomicCAS_block(int *address, int compare, int val) __DEF_IF_HOST
433
+
434
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
435
+ int atomicCAS_system(int *address, int compare, int val) __DEF_IF_HOST
436
+
437
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
438
+ unsigned int atomicCAS_block(unsigned int *address, unsigned int compare,
439
+ unsigned int val) __DEF_IF_HOST
440
+
441
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
442
+ unsigned int atomicCAS_system(unsigned int *address, unsigned int compare,
443
+ unsigned int val) __DEF_IF_HOST
444
+
445
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
446
+ unsigned long long int atomicCAS_block(unsigned long long int *address,
447
+ unsigned long long int compare,
448
+ unsigned long long int val) __DEF_IF_HOST
449
+
450
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
451
+ unsigned long long int atomicCAS_system(unsigned long long int *address,
452
+ unsigned long long int compare,
453
+ unsigned long long int val) __DEF_IF_HOST
454
+
455
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
456
+ int atomicAnd_block(int *address, int val) __DEF_IF_HOST
457
+
458
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
459
+ int atomicAnd_system(int *address, int val) __DEF_IF_HOST
460
+
461
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
462
+ long long atomicAnd_block(long long *address, long long val) __DEF_IF_HOST
463
+
464
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
465
+ long long atomicAnd_system(long long *address, long long val) __DEF_IF_HOST
466
+
467
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
468
+ unsigned int atomicAnd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
469
+
470
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
471
+ unsigned int atomicAnd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
472
+
473
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
474
+ unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
475
+
476
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
477
+ unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
478
+
479
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
480
+ int atomicOr_block(int *address, int val) __DEF_IF_HOST
481
+
482
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
483
+ int atomicOr_system(int *address, int val) __DEF_IF_HOST
484
+
485
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
486
+ long long atomicOr_block(long long *address, long long val) __DEF_IF_HOST
487
+
488
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
489
+ long long atomicOr_system(long long *address, long long val) __DEF_IF_HOST
490
+
491
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
492
+ unsigned int atomicOr_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
493
+
494
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
495
+ unsigned int atomicOr_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
496
+
497
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
498
+ unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
499
+
500
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
501
+ unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
502
+
503
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
504
+ int atomicXor_block(int *address, int val) __DEF_IF_HOST
505
+
506
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
507
+ int atomicXor_system(int *address, int val) __DEF_IF_HOST
508
+
509
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
510
+ long long atomicXor_block(long long *address, long long val) __DEF_IF_HOST
511
+
512
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
513
+ long long atomicXor_system(long long *address, long long val) __DEF_IF_HOST
514
+
515
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
516
+ unsigned int atomicXor_block(unsigned int *address, unsigned int val) __DEF_IF_HOST
517
+
518
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
519
+ unsigned int atomicXor_system(unsigned int *address, unsigned int val) __DEF_IF_HOST
520
+
521
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
522
+ unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
523
+
524
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
525
+ unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST
526
+
527
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */
528
+
529
+ #endif /* __cplusplus && __CUDACC__ */
530
+
531
+ #undef __SM_60_ATOMIC_FUNCTIONS_DECL__
532
+ #undef __DEF_IF_HOST
533
+
534
+ #if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__)
535
+ #include "sm_60_atomic_functions.hpp"
536
+ #endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */
537
+
538
+ #endif /* !__SM_60_ATOMIC_FUNCTIONS_H__ */
539
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SM_60_ATOMIC_FUNCTIONS_HPP__)
51
+ #define __SM_60_ATOMIC_FUNCTIONS_HPP__
52
+
53
+ #if defined(__CUDACC_RTC__)
54
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__
55
+ #else /* __CUDACC_RTC__ */
56
+ #define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__
57
+ #endif /* __CUDACC_RTC__ */
58
+
59
+ #if defined(__cplusplus) && defined(__CUDACC__)
60
+
61
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
62
+
63
+ /*******************************************************************************
64
+ * *
65
+ * *
66
+ * *
67
+ *******************************************************************************/
68
+
69
+ #include "cuda_runtime_api.h"
70
+
71
+ /*******************************************************************************
72
+ * *
73
+ * *
74
+ * *
75
+ *******************************************************************************/
76
+
77
+ __SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val)
78
+ {
79
+ return __dAtomicAdd(address, val);
80
+ }
81
+
82
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
83
+ int atomicAdd_block(int *address, int val)
84
+ {
85
+ return __iAtomicAdd_block(address, val);
86
+ }
87
+
88
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
89
+ int atomicAdd_system(int *address, int val)
90
+ {
91
+ return __iAtomicAdd_system(address, val);
92
+ }
93
+
94
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
95
+ unsigned int atomicAdd_block(unsigned int *address, unsigned int val)
96
+ {
97
+ return __uAtomicAdd_block(address, val);
98
+ }
99
+
100
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
101
+ unsigned int atomicAdd_system(unsigned int *address, unsigned int val)
102
+ {
103
+ return __uAtomicAdd_system(address, val);
104
+ }
105
+
106
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
107
+ unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val)
108
+ {
109
+ return __ullAtomicAdd_block(address, val);
110
+ }
111
+
112
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
113
+ unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val)
114
+ {
115
+ return __ullAtomicAdd_system(address, val);
116
+ }
117
+
118
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
119
+ float atomicAdd_block(float *address, float val)
120
+ {
121
+ return __fAtomicAdd_block(address, val);
122
+ }
123
+
124
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
125
+ float atomicAdd_system(float *address, float val)
126
+ {
127
+ return __fAtomicAdd_system(address, val);
128
+ }
129
+
130
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
131
+ double atomicAdd_block(double *address, double val)
132
+ {
133
+ return __dAtomicAdd_block(address, val);
134
+ }
135
+
136
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
137
+ double atomicAdd_system(double *address, double val)
138
+ {
139
+ return __dAtomicAdd_system(address, val);
140
+ }
141
+
142
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
143
+ int atomicSub_block(int *address, int val)
144
+ {
145
+ return __iAtomicAdd_block(address, (unsigned int)-(int)val);
146
+ }
147
+
148
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
149
+ int atomicSub_system(int *address, int val)
150
+ {
151
+ return __iAtomicAdd_system(address, (unsigned int)-(int)val);
152
+ }
153
+
154
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
155
+ unsigned int atomicSub_block(unsigned int *address, unsigned int val)
156
+ {
157
+ return __uAtomicAdd_block(address, (unsigned int)-(int)val);
158
+ }
159
+
160
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
161
+ unsigned int atomicSub_system(unsigned int *address, unsigned int val)
162
+ {
163
+ return __uAtomicAdd_system(address, (unsigned int)-(int)val);
164
+ }
165
+
166
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
167
+ int atomicExch_block(int *address, int val)
168
+ {
169
+ return __iAtomicExch_block(address, val);
170
+ }
171
+
172
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
173
+ int atomicExch_system(int *address, int val)
174
+ {
175
+ return __iAtomicExch_system(address, val);
176
+ }
177
+
178
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
179
+ unsigned int atomicExch_block(unsigned int *address, unsigned int val)
180
+ {
181
+ return __uAtomicExch_block(address, val);
182
+ }
183
+
184
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
185
+ unsigned int atomicExch_system(unsigned int *address, unsigned int val)
186
+ {
187
+ return __uAtomicExch_system(address, val);
188
+ }
189
+
190
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
191
+ unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val)
192
+ {
193
+ return __ullAtomicExch_block(address, val);
194
+ }
195
+
196
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
197
+ unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val)
198
+ {
199
+ return __ullAtomicExch_system(address, val);
200
+ }
201
+
202
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
203
+ float atomicExch_block(float *address, float val)
204
+ {
205
+ return __fAtomicExch_block(address, val);
206
+ }
207
+
208
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
209
+ float atomicExch_system(float *address, float val)
210
+ {
211
+ return __fAtomicExch_system(address, val);
212
+ }
213
+
214
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
215
+ int atomicMin_block(int *address, int val)
216
+ {
217
+ return __iAtomicMin_block(address, val);
218
+ }
219
+
220
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
221
+ int atomicMin_system(int *address, int val)
222
+ {
223
+ return __iAtomicMin_system(address, val);
224
+ }
225
+
226
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
227
+ long long atomicMin_block(long long *address, long long val)
228
+ {
229
+ return __illAtomicMin_block(address, val);
230
+ }
231
+
232
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
233
+ long long atomicMin_system(long long *address, long long val)
234
+ {
235
+ return __illAtomicMin_system(address, val);
236
+ }
237
+
238
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
239
+ unsigned int atomicMin_block(unsigned int *address, unsigned int val)
240
+ {
241
+ return __uAtomicMin_block(address, val);
242
+ }
243
+
244
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
245
+ unsigned int atomicMin_system(unsigned int *address, unsigned int val)
246
+ {
247
+ return __uAtomicMin_system(address, val);
248
+ }
249
+
250
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
251
+ unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val)
252
+ {
253
+ return __ullAtomicMin_block(address, val);
254
+ }
255
+
256
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
257
+ unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val)
258
+ {
259
+ return __ullAtomicMin_system(address, val);
260
+ }
261
+
262
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
263
+ int atomicMax_block(int *address, int val)
264
+ {
265
+ return __iAtomicMax_block(address, val);
266
+ }
267
+
268
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
269
+ int atomicMax_system(int *address, int val)
270
+ {
271
+ return __iAtomicMax_system(address, val);
272
+ }
273
+
274
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
275
+ long long atomicMax_block(long long *address, long long val)
276
+ {
277
+ return __illAtomicMax_block(address, val);
278
+ }
279
+
280
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
281
+ long long atomicMax_system(long long *address, long long val)
282
+ {
283
+ return __illAtomicMax_system(address, val);
284
+ }
285
+
286
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
287
+ unsigned int atomicMax_block(unsigned int *address, unsigned int val)
288
+ {
289
+ return __uAtomicMax_block(address, val);
290
+ }
291
+
292
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
293
+ unsigned int atomicMax_system(unsigned int *address, unsigned int val)
294
+ {
295
+ return __uAtomicMax_system(address, val);
296
+ }
297
+
298
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
299
+ unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val)
300
+ {
301
+ return __ullAtomicMax_block(address, val);
302
+ }
303
+
304
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
305
+ unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val)
306
+ {
307
+ return __ullAtomicMax_system(address, val);
308
+ }
309
+
310
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
311
+ unsigned int atomicInc_block(unsigned int *address, unsigned int val)
312
+ {
313
+ return __uAtomicInc_block(address, val);
314
+ }
315
+
316
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
317
+ unsigned int atomicInc_system(unsigned int *address, unsigned int val)
318
+ {
319
+ return __uAtomicInc_system(address, val);
320
+ }
321
+
322
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
323
+ unsigned int atomicDec_block(unsigned int *address, unsigned int val)
324
+ {
325
+ return __uAtomicDec_block(address, val);
326
+ }
327
+
328
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
329
+ unsigned int atomicDec_system(unsigned int *address, unsigned int val)
330
+ {
331
+ return __uAtomicDec_system(address, val);
332
+ }
333
+
334
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
335
+ int atomicCAS_block(int *address, int compare, int val)
336
+ {
337
+ return __iAtomicCAS_block(address, compare, val);
338
+ }
339
+
340
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
341
+ int atomicCAS_system(int *address, int compare, int val)
342
+ {
343
+ return __iAtomicCAS_system(address, compare, val);
344
+ }
345
+
346
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
347
+ unsigned int atomicCAS_block(unsigned int *address, unsigned int compare,
348
+ unsigned int val)
349
+ {
350
+ return __uAtomicCAS_block(address, compare, val);
351
+ }
352
+
353
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
354
+ unsigned int atomicCAS_system(unsigned int *address, unsigned int compare,
355
+ unsigned int val)
356
+ {
357
+ return __uAtomicCAS_system(address, compare, val);
358
+ }
359
+
360
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
361
+ unsigned long long int atomicCAS_block(unsigned long long int *address,
362
+ unsigned long long int compare,
363
+ unsigned long long int val)
364
+ {
365
+ return __ullAtomicCAS_block(address, compare, val);
366
+ }
367
+
368
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
369
+ unsigned long long int atomicCAS_system(unsigned long long int *address,
370
+ unsigned long long int compare,
371
+ unsigned long long int val)
372
+ {
373
+ return __ullAtomicCAS_system(address, compare, val);
374
+ }
375
+
376
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
377
+ int atomicAnd_block(int *address, int val)
378
+ {
379
+ return __iAtomicAnd_block(address, val);
380
+ }
381
+
382
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
383
+ int atomicAnd_system(int *address, int val)
384
+ {
385
+ return __iAtomicAnd_system(address, val);
386
+ }
387
+
388
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
389
+ long long atomicAnd_block(long long *address, long long val)
390
+ {
391
+ return __llAtomicAnd_block(address, val);
392
+ }
393
+
394
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
395
+ long long atomicAnd_system(long long *address, long long val)
396
+ {
397
+ return __llAtomicAnd_system(address, val);
398
+ }
399
+
400
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
401
+ unsigned int atomicAnd_block(unsigned int *address, unsigned int val)
402
+ {
403
+ return __uAtomicAnd_block(address, val);
404
+ }
405
+
406
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
407
+ unsigned int atomicAnd_system(unsigned int *address, unsigned int val)
408
+ {
409
+ return __uAtomicAnd_system(address, val);
410
+ }
411
+
412
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
413
+ unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val)
414
+ {
415
+ return __ullAtomicAnd_block(address, val);
416
+ }
417
+
418
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
419
+ unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val)
420
+ {
421
+ return __ullAtomicAnd_system(address, val);
422
+ }
423
+
424
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
425
+ int atomicOr_block(int *address, int val)
426
+ {
427
+ return __iAtomicOr_block(address, val);
428
+ }
429
+
430
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
431
+ int atomicOr_system(int *address, int val)
432
+ {
433
+ return __iAtomicOr_system(address, val);
434
+ }
435
+
436
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
437
+ long long atomicOr_block(long long *address, long long val)
438
+ {
439
+ return __llAtomicOr_block(address, val);
440
+ }
441
+
442
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
443
+ long long atomicOr_system(long long *address, long long val)
444
+ {
445
+ return __llAtomicOr_system(address, val);
446
+ }
447
+
448
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
449
+ unsigned int atomicOr_block(unsigned int *address, unsigned int val)
450
+ {
451
+ return __uAtomicOr_block(address, val);
452
+ }
453
+
454
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
455
+ unsigned int atomicOr_system(unsigned int *address, unsigned int val)
456
+ {
457
+ return __uAtomicOr_system(address, val);
458
+ }
459
+
460
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
461
+ unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val)
462
+ {
463
+ return __ullAtomicOr_block(address, val);
464
+ }
465
+
466
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
467
+ unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val)
468
+ {
469
+ return __ullAtomicOr_system(address, val);
470
+ }
471
+
472
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
473
+ int atomicXor_block(int *address, int val)
474
+ {
475
+ return __iAtomicXor_block(address, val);
476
+ }
477
+
478
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
479
+ int atomicXor_system(int *address, int val)
480
+ {
481
+ return __iAtomicXor_system(address, val);
482
+ }
483
+
484
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
485
+ long long atomicXor_block(long long *address, long long val)
486
+ {
487
+ return __llAtomicXor_block(address, val);
488
+ }
489
+
490
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
491
+ long long atomicXor_system(long long *address, long long val)
492
+ {
493
+ return __llAtomicXor_system(address, val);
494
+ }
495
+
496
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
497
+ unsigned int atomicXor_block(unsigned int *address, unsigned int val)
498
+ {
499
+ return __uAtomicXor_block(address, val);
500
+ }
501
+
502
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
503
+ unsigned int atomicXor_system(unsigned int *address, unsigned int val)
504
+ {
505
+ return __uAtomicXor_system(address, val);
506
+ }
507
+
508
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
509
+ unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val)
510
+ {
511
+ return __ullAtomicXor_block(address, val);
512
+ }
513
+
514
+ __SM_60_ATOMIC_FUNCTIONS_DECL__
515
+ unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val)
516
+ {
517
+ return __ullAtomicXor_system(address, val);
518
+ }
519
+
520
+ #endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */
521
+
522
+ #endif /* __cplusplus && __CUDACC__ */
523
+
524
+ #undef __SM_60_ATOMIC_FUNCTIONS_DECL__
525
+
526
+ #endif /* !__SM_60_ATOMIC_FUNCTIONS_HPP__ */
527
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_functions.h ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__SURFACE_FUNCTIONS_H__)
51
+ #define __SURFACE_FUNCTIONS_H__
52
+
53
+ #if defined(__cplusplus) && defined(__CUDACC__)
54
+
55
+ /*******************************************************************************
56
+ * *
57
+ * *
58
+ * *
59
+ *******************************************************************************/
60
+
61
+ #include "cuda_runtime_api.h"
62
+ #include "cuda_surface_types.h"
63
+
64
+ #if defined(_WIN32)
65
+ # define __DEPRECATED__ __declspec(deprecated)
66
+ #else
67
+ # define __DEPRECATED__ __attribute__((deprecated))
68
+ #endif
69
+
70
+
71
+
72
+ #ifdef __CUDA_ARCH__
73
+ template <typename T> struct __nv_surf_trait { typedef void * cast_type; };
74
+
75
+ template<> struct __nv_surf_trait<char> { typedef char * cast_type; };
76
+ template<> struct __nv_surf_trait<signed char> { typedef signed char * cast_type; };
77
+ template<> struct __nv_surf_trait<unsigned char> { typedef unsigned char * cast_type; };
78
+ template<> struct __nv_surf_trait<char1> { typedef char1 * cast_type; };
79
+ template<> struct __nv_surf_trait<uchar1> { typedef uchar1 * cast_type; };
80
+ template<> struct __nv_surf_trait<char2> { typedef char2 * cast_type; };
81
+ template<> struct __nv_surf_trait<uchar2> { typedef uchar2 * cast_type; };
82
+ template<> struct __nv_surf_trait<char4> { typedef char4 * cast_type; };
83
+ template<> struct __nv_surf_trait<uchar4> { typedef uchar4 * cast_type; };
84
+ template<> struct __nv_surf_trait<short> { typedef short * cast_type; };
85
+ template<> struct __nv_surf_trait<unsigned short> { typedef unsigned short * cast_type; };
86
+ template<> struct __nv_surf_trait<short1> { typedef short1 * cast_type; };
87
+ template<> struct __nv_surf_trait<ushort1> { typedef ushort1 * cast_type; };
88
+ template<> struct __nv_surf_trait<short2> { typedef short2 * cast_type; };
89
+ template<> struct __nv_surf_trait<ushort2> { typedef ushort2 * cast_type; };
90
+ template<> struct __nv_surf_trait<short4> { typedef short4 * cast_type; };
91
+ template<> struct __nv_surf_trait<ushort4> { typedef ushort4 * cast_type; };
92
+ template<> struct __nv_surf_trait<int> { typedef int * cast_type; };
93
+ template<> struct __nv_surf_trait<unsigned int> { typedef unsigned int * cast_type; };
94
+ template<> struct __nv_surf_trait<int1> { typedef int1 * cast_type; };
95
+ template<> struct __nv_surf_trait<uint1> { typedef uint1 * cast_type; };
96
+ template<> struct __nv_surf_trait<int2> { typedef int2 * cast_type; };
97
+ template<> struct __nv_surf_trait<uint2> { typedef uint2 * cast_type; };
98
+ template<> struct __nv_surf_trait<int4> { typedef int4 * cast_type; };
99
+ template<> struct __nv_surf_trait<uint4> { typedef uint4 * cast_type; };
100
+ template<> struct __nv_surf_trait<long long> { typedef long long * cast_type; };
101
+ template<> struct __nv_surf_trait<unsigned long long> { typedef unsigned long long * cast_type; };
102
+ template<> struct __nv_surf_trait<longlong1> { typedef longlong1 * cast_type; };
103
+ template<> struct __nv_surf_trait<ulonglong1> { typedef ulonglong1 * cast_type; };
104
+ template<> struct __nv_surf_trait<longlong2> { typedef longlong2 * cast_type; };
105
+ template<> struct __nv_surf_trait<ulonglong2> { typedef ulonglong2 * cast_type; };
106
+ #if !defined(__LP64__)
107
+ template<> struct __nv_surf_trait<long> { typedef int * cast_type; };
108
+ template<> struct __nv_surf_trait<unsigned long> { typedef unsigned int * cast_type; };
109
+ template<> struct __nv_surf_trait<long1> { typedef int1 * cast_type; };
110
+ template<> struct __nv_surf_trait<ulong1> { typedef uint1 * cast_type; };
111
+ template<> struct __nv_surf_trait<long2> { typedef int2 * cast_type; };
112
+ template<> struct __nv_surf_trait<ulong2> { typedef uint2 * cast_type; };
113
+ template<> struct __nv_surf_trait<long4> { typedef uint4 * cast_type; };
114
+ template<> struct __nv_surf_trait<ulong4> { typedef int4 * cast_type; };
115
+ #endif
116
+ template<> struct __nv_surf_trait<float> { typedef float * cast_type; };
117
+ template<> struct __nv_surf_trait<float1> { typedef float1 * cast_type; };
118
+ template<> struct __nv_surf_trait<float2> { typedef float2 * cast_type; };
119
+ template<> struct __nv_surf_trait<float4> { typedef float4 * cast_type; };
120
+ #endif /* defined(__CUDA_ARCH__) */
121
+
122
+ template <typename T>
123
+ static __DEPRECATED__ __device__ __forceinline__ void surf1Dread(T *res, surface<void, cudaSurfaceType1D> surf, int x, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
124
+ {
125
+ #ifdef __CUDA_ARCH__
126
+ __nv_tex_surf_handler("__surf1Dread_v2", (void *)res, s, surf, x, mode);
127
+ #endif
128
+ }
129
+
130
+ template<class T>
131
+ static __DEPRECATED__ __device__ __forceinline__ T surf1Dread(surface<void, cudaSurfaceType1D> surf, int x, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
132
+ {
133
+ #ifdef __CUDA_ARCH__
134
+ T temp;
135
+ __nv_tex_surf_handler("__surf1Dread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, mode);
136
+ return temp;
137
+ #endif
138
+ }
139
+
140
+ template<class T>
141
+ static __DEPRECATED__ __device__ __forceinline__ void surf1Dread(T *res, surface<void, cudaSurfaceType1D> surf, int x, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
142
+ {
143
+ #ifdef __CUDA_ARCH__
144
+ *res = surf1Dread<T>(surf, x, mode);
145
+ #endif /* __CUDA_ARCH__ */
146
+ }
147
+
148
+
149
+ template <typename T>
150
+ static __DEPRECATED__ __device__ __forceinline__ void surf2Dread(T *res, surface<void, cudaSurfaceType2D> surf, int x, int y, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
151
+ {
152
+ #ifdef __CUDA_ARCH__
153
+ __nv_tex_surf_handler("__surf2Dread_v2", (void *)res, s, surf, x, y, mode);
154
+ #endif
155
+ }
156
+
157
+ template<class T>
158
+ static __DEPRECATED__ __device__ __forceinline__ T surf2Dread(surface<void, cudaSurfaceType2D> surf, int x, int y, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
159
+ {
160
+ #ifdef __CUDA_ARCH__
161
+ T temp;
162
+ __nv_tex_surf_handler("__surf2Dread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, y, mode);
163
+ return temp;
164
+ #endif
165
+ }
166
+
167
+ template<class T>
168
+ static __DEPRECATED__ __device__ __forceinline__ void surf2Dread(T *res, surface<void, cudaSurfaceType2D> surf, int x, int y, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
169
+ {
170
+ #ifdef __CUDA_ARCH__
171
+ *res = surf2Dread<T>(surf, x, y, mode);
172
+ #endif /* __CUDA_ARCH__ */
173
+ }
174
+
175
+
176
+ template <typename T>
177
+ static __DEPRECATED__ __device__ __forceinline__ void surf3Dread(T *res, surface<void, cudaSurfaceType3D> surf, int x, int y, int z, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
178
+ {
179
+ #ifdef __CUDA_ARCH__
180
+ __nv_tex_surf_handler("__surf3Dread_v2", (void *)res, s, surf, x, y, z, mode);
181
+ #endif
182
+ }
183
+
184
+ template<class T>
185
+ static __DEPRECATED__ __device__ __forceinline__ T surf3Dread(surface<void, cudaSurfaceType3D> surf, int x, int y, int z, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
186
+ {
187
+ #ifdef __CUDA_ARCH__
188
+ T temp;
189
+ __nv_tex_surf_handler("__surf3Dread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, y, z, mode);
190
+ return temp;
191
+ #endif
192
+ }
193
+
194
+ template<class T>
195
+ static __DEPRECATED__ __device__ __forceinline__ void surf3Dread(T *res, surface<void, cudaSurfaceType3D> surf, int x, int y, int z, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
196
+ {
197
+ #ifdef __CUDA_ARCH__
198
+ *res = surf3Dread<T>(surf, x, y, z, mode);
199
+ #endif /* __CUDA_ARCH__ */
200
+ }
201
+
202
+
203
+
204
+ template <typename T>
205
+ static __DEPRECATED__ __device__ __forceinline__ void surf1DLayeredread(T *res, surface<void, cudaSurfaceType1DLayered> surf, int x, int layer, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
206
+ {
207
+ #ifdef __CUDA_ARCH__
208
+ __nv_tex_surf_handler("__surf1DLayeredread_v2", (void *)res, s, surf, x, layer, mode);
209
+ #endif
210
+ }
211
+
212
+ template<class T>
213
+ static __DEPRECATED__ __device__ __forceinline__ T surf1DLayeredread(surface<void, cudaSurfaceType1DLayered> surf, int x, int layer, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
214
+ {
215
+ #ifdef __CUDA_ARCH__
216
+ T temp;
217
+ __nv_tex_surf_handler("__surf1DLayeredread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, layer, mode);
218
+ return temp;
219
+ #endif
220
+ }
221
+
222
+
223
+ template<class T>
224
+ static __DEPRECATED__ __device__ __forceinline__ void surf1DLayeredread(T *res, surface<void, cudaSurfaceType1DLayered> surf, int x, int layer, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
225
+ {
226
+ #ifdef __CUDA_ARCH__
227
+ *res = surf1DLayeredread<T>(surf, x, layer, mode);
228
+ #endif /* __CUDA_ARCH__ */
229
+ }
230
+
231
+
232
+ template <typename T>
233
+ static __DEPRECATED__ __device__ __forceinline__ void surf2DLayeredread(T *res, surface<void, cudaSurfaceType2DLayered> surf, int x, int y, int layer, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
234
+ {
235
+ #ifdef __CUDA_ARCH__
236
+ __nv_tex_surf_handler("__surf2DLayeredread_v2", (void *)res, s, surf, x, y, layer, mode);
237
+ #endif
238
+ }
239
+
240
+ template<class T>
241
+ static __DEPRECATED__ __device__ __forceinline__ T surf2DLayeredread(surface<void, cudaSurfaceType2DLayered> surf, int x, int y, int layer, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
242
+ {
243
+ #ifdef __CUDA_ARCH__
244
+ T temp;
245
+ __nv_tex_surf_handler("__surf2DLayeredread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, y, layer, mode);
246
+ return temp;
247
+ #endif
248
+ }
249
+
250
+
251
+ template<class T>
252
+ static __DEPRECATED__ __device__ __forceinline__ void surf2DLayeredread(T *res, surface<void, cudaSurfaceType2DLayered> surf, int x, int y, int layer, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
253
+ {
254
+ #ifdef __CUDA_ARCH__
255
+ *res = surf2DLayeredread<T>(surf, x, y, layer, mode);
256
+ #endif /* __CUDA_ARCH__ */
257
+ }
258
+
259
+
260
+ template <typename T>
261
+ static __device__ __forceinline__ void surfCubemapread(T *res, surface<void, cudaSurfaceTypeCubemap> surf, int x, int y, int face, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
262
+ {
263
+ #ifdef __CUDA_ARCH__
264
+ __nv_tex_surf_handler("__surfCubemapread_v2", (void *)res, s, surf, x, y, face, mode);
265
+ #endif
266
+ }
267
+
268
+ template<class T>
269
+ static __DEPRECATED__ __device__ __forceinline__ T surfCubemapread(surface<void, cudaSurfaceTypeCubemap> surf, int x, int y, int face, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
270
+ {
271
+ #ifdef __CUDA_ARCH__
272
+ T temp;
273
+
274
+ __nv_tex_surf_handler("__surfCubemapread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, y, face, mode);
275
+ return temp;
276
+ #endif
277
+ }
278
+
279
+ template<class T>
280
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapread(T *res, surface<void, cudaSurfaceTypeCubemap> surf, int x, int y, int face, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
281
+ {
282
+ #ifdef __CUDA_ARCH__
283
+ *res = surfCubemapread<T>(surf, x, y, face, mode);
284
+ #endif /* __CUDA_ARCH__ */
285
+ }
286
+
287
+
288
+ template <typename T>
289
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapLayeredread(T *res, surface<void, cudaSurfaceTypeCubemapLayered> surf, int x, int y, int layerFace, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
290
+ {
291
+ #ifdef __CUDA_ARCH__
292
+ __nv_tex_surf_handler("__surfCubemapLayeredread_v2", (void *)res, s, surf, x, y, layerFace, mode);
293
+ #endif
294
+ }
295
+
296
+ template<class T>
297
+ static __DEPRECATED__ __device__ __forceinline__ T surfCubemapLayeredread(surface<void, cudaSurfaceTypeCubemapLayered> surf, int x, int y, int layerFace, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
298
+ {
299
+ #ifdef __CUDA_ARCH__
300
+ T temp;
301
+ __nv_tex_surf_handler("__surfCubemapLayeredread_v2", (typename __nv_surf_trait<T>::cast_type)&temp, (int)sizeof(T), surf, x, y, layerFace, mode);
302
+ return temp;
303
+ #endif
304
+ }
305
+
306
+ template<class T>
307
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapLayeredread(T *res, surface<void, cudaSurfaceTypeCubemapLayered> surf, int x, int y, int layerFace, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
308
+ {
309
+ #ifdef __CUDA_ARCH__
310
+ *res = surfCubemapLayeredread<T>(surf, x, y, layerFace, mode);
311
+ #endif /* __CUDA_ARCH__ */
312
+ }
313
+
314
+ //surf1Dwrite
315
+ template<class T>
316
+ static __DEPRECATED__ __device__ __forceinline__ void surf1Dwrite(T val, surface<void, cudaSurfaceType1D> surf, int x, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
317
+ {
318
+ #ifdef __CUDA_ARCH__
319
+ __nv_tex_surf_handler("__surf1Dwrite_v2", (void *)&val, s, surf, x, mode);
320
+ #endif
321
+ }
322
+
323
+ template<class T>
324
+ static __DEPRECATED__ __device__ __forceinline__ void surf1Dwrite(T val, surface<void, cudaSurfaceType1D> surf, int x, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
325
+ {
326
+ #ifdef __CUDA_ARCH__
327
+ __nv_tex_surf_handler("__surf1Dwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, mode);
328
+ #endif /* __CUDA_ARCH__ */
329
+ }
330
+
331
+
332
+ //surf2Dwrite
333
+ template<class T>
334
+ static __DEPRECATED__ __device__ __forceinline__ void surf2Dwrite(T val, surface<void, cudaSurfaceType2D> surf, int x, int y, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
335
+ {
336
+ #ifdef __CUDA_ARCH__
337
+ __nv_tex_surf_handler("__surf2Dwrite_v2", (void *)&val, s, surf, x, y, mode);
338
+ #endif
339
+ }
340
+
341
+ template<class T>
342
+ static __DEPRECATED__ __device__ __forceinline__ void surf2Dwrite(T val, surface<void, cudaSurfaceType2D> surf, int x, int y, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
343
+ {
344
+ #ifdef __CUDA_ARCH__
345
+ __nv_tex_surf_handler("__surf2Dwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, y, mode);
346
+ #endif /* __CUDA_ARCH__ */
347
+ }
348
+
349
+ //surf3Dwrite
350
+ template<class T>
351
+ static __DEPRECATED__ __device__ __forceinline__ void surf3Dwrite(T val, surface<void, cudaSurfaceType3D> surf, int x, int y, int z, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
352
+ {
353
+ #ifdef __CUDA_ARCH__
354
+ __nv_tex_surf_handler("__surf3Dwrite_v2", (void *)&val, s, surf, x, y, z,mode);
355
+ #endif
356
+ }
357
+
358
+ template<class T>
359
+ static __DEPRECATED__ __device__ __forceinline__ void surf3Dwrite(T val, surface<void, cudaSurfaceType3D> surf, int x, int y, int z, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
360
+ {
361
+ #ifdef __CUDA_ARCH__
362
+ __nv_tex_surf_handler("__surf3Dwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, y, z, mode);
363
+ #endif /* __CUDA_ARCH__ */
364
+ }
365
+
366
+ //surf1DLayeredwrite
367
+ template<class T>
368
+ static __DEPRECATED__ __device__ __forceinline__ void surf1DLayeredwrite(T val, surface<void, cudaSurfaceType1DLayered> surf, int x, int layer, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
369
+ {
370
+ #ifdef __CUDA_ARCH__
371
+ __nv_tex_surf_handler("__surf1DLayeredwrite_v2", (void *)&val, s, surf, x, layer,mode);
372
+ #endif
373
+ }
374
+
375
+ template<class T>
376
+ static __DEPRECATED__ __device__ __forceinline__ void surf1DLayeredwrite(T val, surface<void, cudaSurfaceType1DLayered> surf, int x, int layer, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
377
+ {
378
+ #ifdef __CUDA_ARCH__
379
+ __nv_tex_surf_handler("__surf1DLayeredwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, layer, mode);
380
+ #endif /* __CUDA_ARCH__ */
381
+ }
382
+
383
+ //surf2DLayeredwrite
384
+ template<class T>
385
+ static __DEPRECATED__ __device__ __forceinline__ void surf2DLayeredwrite(T val, surface<void, cudaSurfaceType2DLayered> surf, int x, int y, int layer, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
386
+ {
387
+ #ifdef __CUDA_ARCH__
388
+ __nv_tex_surf_handler("__surf2DLayeredwrite_v2", (void *)&val, s, surf, x, y, layer,mode);
389
+ #endif
390
+ }
391
+
392
+ template<class T>
393
+ static __DEPRECATED__ __device__ __forceinline__ void surf2DLayeredwrite(T val, surface<void, cudaSurfaceType2DLayered> surf, int x, int y, int layer, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
394
+ {
395
+ #ifdef __CUDA_ARCH__
396
+ __nv_tex_surf_handler("__surf2DLayeredwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, y, layer, mode);
397
+ #endif /* __CUDA_ARCH__ */
398
+ }
399
+
400
+ //surfCubemapwrite
401
+ template<class T>
402
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapwrite(T val, surface<void, cudaSurfaceTypeCubemap> surf, int x, int y, int face, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
403
+ {
404
+ #ifdef __CUDA_ARCH__
405
+ __nv_tex_surf_handler("__surfCubemapwrite_v2", (void *)&val, s, surf, x, y, face, mode);
406
+ #endif
407
+ }
408
+
409
+ template<class T>
410
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapwrite(T val, surface<void, cudaSurfaceTypeCubemap> surf, int x, int y, int face, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
411
+ {
412
+ #ifdef __CUDA_ARCH__
413
+ __nv_tex_surf_handler("__surfCubemapwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, y, face, mode);
414
+ #endif /* __CUDA_ARCH__ */
415
+ }
416
+
417
+
418
+ //surfCubemapLayeredwrite
419
+ template<class T>
420
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapLayeredwrite(T val, surface<void, cudaSurfaceTypeCubemapLayered> surf, int x, int y, int layerFace, int s, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
421
+ {
422
+ #ifdef __CUDA_ARCH__
423
+ __nv_tex_surf_handler("__surfCubemapLayeredwrite_v2", (void *)&val, s, surf, x, y, layerFace, mode);
424
+ #endif
425
+ }
426
+
427
+ template<class T>
428
+ static __DEPRECATED__ __device__ __forceinline__ void surfCubemapLayeredwrite(T val, surface<void, cudaSurfaceTypeCubemapLayered> surf, int x, int y, int layerFace, enum cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
429
+ {
430
+ #ifdef __CUDA_ARCH__
431
+ __nv_tex_surf_handler("__surfCubemapLayeredwrite_v2", (typename __nv_surf_trait<T>::cast_type)&val, (int)sizeof(T), surf, x, y, layerFace, mode);
432
+ #endif /* __CUDA_ARCH__ */
433
+ }
434
+
435
+ #undef __DEPRECATED__
436
+
437
+
438
+ #endif /* __cplusplus && __CUDACC__ */
439
+ #endif /* !__SURFACE_FUNCTIONS_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/surface_indirect_functions.h ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+
51
+ #ifndef __SURFACE_INDIRECT_FUNCTIONS_H__
52
+ #define __SURFACE_INDIRECT_FUNCTIONS_H__
53
+
54
+
55
+ #if defined(__cplusplus) && defined(__CUDACC__)
56
+
57
+ #include "cuda_runtime_api.h"
58
+
59
+ template<typename T> struct __nv_isurf_trait { };
60
+ template<> struct __nv_isurf_trait<char> { typedef void type; };
61
+ template<> struct __nv_isurf_trait<signed char> { typedef void type; };
62
+ template<> struct __nv_isurf_trait<char1> { typedef void type; };
63
+ template<> struct __nv_isurf_trait<unsigned char> { typedef void type; };
64
+ template<> struct __nv_isurf_trait<uchar1> { typedef void type; };
65
+ template<> struct __nv_isurf_trait<short> { typedef void type; };
66
+ template<> struct __nv_isurf_trait<short1> { typedef void type; };
67
+ template<> struct __nv_isurf_trait<unsigned short> { typedef void type; };
68
+ template<> struct __nv_isurf_trait<ushort1> { typedef void type; };
69
+ template<> struct __nv_isurf_trait<int> { typedef void type; };
70
+ template<> struct __nv_isurf_trait<int1> { typedef void type; };
71
+ template<> struct __nv_isurf_trait<unsigned int> { typedef void type; };
72
+ template<> struct __nv_isurf_trait<uint1> { typedef void type; };
73
+ template<> struct __nv_isurf_trait<long long> { typedef void type; };
74
+ template<> struct __nv_isurf_trait<longlong1> { typedef void type; };
75
+ template<> struct __nv_isurf_trait<unsigned long long> { typedef void type; };
76
+ template<> struct __nv_isurf_trait<ulonglong1> { typedef void type; };
77
+ template<> struct __nv_isurf_trait<float> { typedef void type; };
78
+ template<> struct __nv_isurf_trait<float1> { typedef void type; };
79
+
80
+ template<> struct __nv_isurf_trait<char2> { typedef void type; };
81
+ template<> struct __nv_isurf_trait<uchar2> { typedef void type; };
82
+ template<> struct __nv_isurf_trait<short2> { typedef void type; };
83
+ template<> struct __nv_isurf_trait<ushort2> { typedef void type; };
84
+ template<> struct __nv_isurf_trait<int2> { typedef void type; };
85
+ template<> struct __nv_isurf_trait<uint2> { typedef void type; };
86
+ template<> struct __nv_isurf_trait<longlong2> { typedef void type; };
87
+ template<> struct __nv_isurf_trait<ulonglong2> { typedef void type; };
88
+ template<> struct __nv_isurf_trait<float2> { typedef void type; };
89
+
90
+ template<> struct __nv_isurf_trait<char4> { typedef void type; };
91
+ template<> struct __nv_isurf_trait<uchar4> { typedef void type; };
92
+ template<> struct __nv_isurf_trait<short4> { typedef void type; };
93
+ template<> struct __nv_isurf_trait<ushort4> { typedef void type; };
94
+ template<> struct __nv_isurf_trait<int4> { typedef void type; };
95
+ template<> struct __nv_isurf_trait<uint4> { typedef void type; };
96
+ template<> struct __nv_isurf_trait<float4> { typedef void type; };
97
+
98
+
99
+ template <typename T>
100
+ static __device__ typename __nv_isurf_trait<T>::type surf1Dread(T *ptr, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
101
+ {
102
+ #ifdef __CUDA_ARCH__
103
+ __nv_tex_surf_handler("__isurf1Dread", ptr, obj, x, mode);
104
+ #endif /* __CUDA_ARCH__ */
105
+ }
106
+
107
+ template <class T>
108
+ static __device__ T surf1Dread(cudaSurfaceObject_t surfObject, int x, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
109
+ {
110
+ #ifdef __CUDA_ARCH__
111
+ T ret;
112
+ surf1Dread(&ret, surfObject, x, boundaryMode);
113
+ return ret;
114
+ #endif /* __CUDA_ARCH__ */
115
+ }
116
+
117
+ template <typename T>
118
+ static __device__ typename __nv_isurf_trait<T>::type surf2Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
119
+ {
120
+ #ifdef __CUDA_ARCH__
121
+ __nv_tex_surf_handler("__isurf2Dread", ptr, obj, x, y, mode);
122
+ #endif /* __CUDA_ARCH__ */
123
+ }
124
+
125
+ template <class T>
126
+ static __device__ T surf2Dread(cudaSurfaceObject_t surfObject, int x, int y, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
127
+ {
128
+ #ifdef __CUDA_ARCH__
129
+ T ret;
130
+ surf2Dread(&ret, surfObject, x, y, boundaryMode);
131
+ return ret;
132
+ #endif /* __CUDA_ARCH__ */
133
+ }
134
+
135
+
136
+ template <typename T>
137
+ static __device__ typename __nv_isurf_trait<T>::type surf3Dread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
138
+ {
139
+ #ifdef __CUDA_ARCH__
140
+ __nv_tex_surf_handler("__isurf3Dread", ptr, obj, x, y, z, mode);
141
+ #endif /* __CUDA_ARCH__ */
142
+ }
143
+
144
+ template <class T>
145
+ static __device__ T surf3Dread(cudaSurfaceObject_t surfObject, int x, int y, int z, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
146
+ {
147
+ #ifdef __CUDA_ARCH__
148
+ T ret;
149
+ surf3Dread(&ret, surfObject, x, y, z, boundaryMode);
150
+ return ret;
151
+ #endif /* __CUDA_ARCH__ */
152
+ }
153
+
154
+ template <typename T>
155
+ static __device__ typename __nv_isurf_trait<T>::type surf1DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
156
+ {
157
+ #ifdef __CUDA_ARCH__
158
+ __nv_tex_surf_handler("__isurf1DLayeredread", ptr, obj, x, layer, mode);
159
+ #endif /* __CUDA_ARCH__ */
160
+ }
161
+
162
+ template <class T>
163
+ static __device__ T surf1DLayeredread(cudaSurfaceObject_t surfObject, int x, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
164
+ {
165
+ #ifdef __CUDA_ARCH__
166
+ T ret;
167
+ surf1DLayeredread(&ret, surfObject, x, layer, boundaryMode);
168
+ return ret;
169
+ #endif /* __CUDA_ARCH__ */
170
+ }
171
+
172
+ template <typename T>
173
+ static __device__ typename __nv_isurf_trait<T>::type surf2DLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
174
+ {
175
+ #ifdef __CUDA_ARCH__
176
+ __nv_tex_surf_handler("__isurf2DLayeredread", ptr, obj, x, y, layer, mode);
177
+ #endif /* __CUDA_ARCH__ */
178
+ }
179
+
180
+ template <class T>
181
+ static __device__ T surf2DLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layer, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
182
+ {
183
+ #ifdef __CUDA_ARCH__
184
+ T ret;
185
+ surf2DLayeredread(&ret, surfObject, x, y, layer, boundaryMode);
186
+ return ret;
187
+ #endif /* __CUDA_ARCH__ */
188
+ }
189
+
190
+ template <typename T>
191
+ static __device__ typename __nv_isurf_trait<T>::type surfCubemapread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
192
+ {
193
+ #ifdef __CUDA_ARCH__
194
+ __nv_tex_surf_handler("__isurfCubemapread", ptr, obj, x, y, face, mode);
195
+ #endif /* __CUDA_ARCH__ */
196
+ }
197
+
198
+ template <class T>
199
+ static __device__ T surfCubemapread(cudaSurfaceObject_t surfObject, int x, int y, int face, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
200
+ {
201
+ #ifdef __CUDA_ARCH__
202
+ T ret;
203
+ surfCubemapread(&ret, surfObject, x, y, face, boundaryMode);
204
+ return ret;
205
+ #endif /* __CUDA_ARCH__ */
206
+ }
207
+
208
+ template <typename T>
209
+ static __device__ typename __nv_isurf_trait<T>::type surfCubemapLayeredread(T *ptr, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
210
+ {
211
+ #ifdef __CUDA_ARCH__
212
+ __nv_tex_surf_handler("__isurfCubemapLayeredread", ptr, obj, x, y, layerface, mode);
213
+ #endif /* __CUDA_ARCH__ */
214
+ }
215
+
216
+ template <class T>
217
+ static __device__ T surfCubemapLayeredread(cudaSurfaceObject_t surfObject, int x, int y, int layerface, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
218
+ {
219
+ #ifdef __CUDA_ARCH__
220
+ T ret;
221
+ surfCubemapLayeredread(&ret, surfObject, x, y, layerface, boundaryMode);
222
+ return ret;
223
+ #endif /* __CUDA_ARCH__ */
224
+ }
225
+
226
+ template <typename T>
227
+ static __device__ typename __nv_isurf_trait<T>::type surf1Dwrite(T val, cudaSurfaceObject_t obj, int x, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
228
+ {
229
+ #ifdef __CUDA_ARCH__
230
+ __nv_tex_surf_handler("__isurf1Dwrite_v2", &val, obj, x, mode);
231
+ #endif /* __CUDA_ARCH__ */
232
+ }
233
+
234
+ template <typename T>
235
+ static __device__ typename __nv_isurf_trait<T>::type surf2Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
236
+ {
237
+ #ifdef __CUDA_ARCH__
238
+ __nv_tex_surf_handler("__isurf2Dwrite_v2", &val, obj, x, y, mode);
239
+ #endif /* __CUDA_ARCH__ */
240
+ }
241
+
242
+ template <typename T>
243
+ static __device__ typename __nv_isurf_trait<T>::type surf3Dwrite(T val, cudaSurfaceObject_t obj, int x, int y, int z, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
244
+ {
245
+ #ifdef __CUDA_ARCH__
246
+ __nv_tex_surf_handler("__isurf3Dwrite_v2", &val, obj, x, y, z, mode);
247
+ #endif /* __CUDA_ARCH__ */
248
+ }
249
+
250
+ template <typename T>
251
+ static __device__ typename __nv_isurf_trait<T>::type surf1DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
252
+ {
253
+ #ifdef __CUDA_ARCH__
254
+ __nv_tex_surf_handler("__isurf1DLayeredwrite_v2", &val, obj, x, layer, mode);
255
+ #endif /* __CUDA_ARCH__ */
256
+ }
257
+
258
+ template <typename T>
259
+ static __device__ typename __nv_isurf_trait<T>::type surf2DLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layer, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
260
+ {
261
+ #ifdef __CUDA_ARCH__
262
+ __nv_tex_surf_handler("__isurf2DLayeredwrite_v2", &val, obj, x, y, layer, mode);
263
+ #endif /* __CUDA_ARCH__ */
264
+ }
265
+
266
+ template <typename T>
267
+ static __device__ typename __nv_isurf_trait<T>::type surfCubemapwrite(T val, cudaSurfaceObject_t obj, int x, int y, int face, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
268
+ {
269
+ #ifdef __CUDA_ARCH__
270
+ __nv_tex_surf_handler("__isurfCubemapwrite_v2", &val, obj, x, y, face, mode);
271
+ #endif /* __CUDA_ARCH__ */
272
+ }
273
+
274
+ template <typename T>
275
+ static __device__ typename __nv_isurf_trait<T>::type surfCubemapLayeredwrite(T val, cudaSurfaceObject_t obj, int x, int y, int layerface, cudaSurfaceBoundaryMode mode = cudaBoundaryModeTrap)
276
+ {
277
+ #ifdef __CUDA_ARCH__
278
+ __nv_tex_surf_handler("__isurfCubemapLayeredwrite_v2", &val, obj, x, y, layerface, mode);
279
+ #endif /* __CUDA_ARCH__ */
280
+ }
281
+
282
+ #endif // __cplusplus && __CUDACC__
283
+
284
+ #endif // __SURFACE_INDIRECT_FUNCTIONS_H__
285
+
286
+
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__TEXTURE_TYPES_H__)
51
+ #define __TEXTURE_TYPES_H__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "driver_types.h"
60
+
61
+ /**
62
+ * \addtogroup CUDART_TYPES
63
+ *
64
+ * @{
65
+ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ #define cudaTextureType1D 0x01
74
+ #define cudaTextureType2D 0x02
75
+ #define cudaTextureType3D 0x03
76
+ #define cudaTextureTypeCubemap 0x0C
77
+ #define cudaTextureType1DLayered 0xF1
78
+ #define cudaTextureType2DLayered 0xF2
79
+ #define cudaTextureTypeCubemapLayered 0xFC
80
+
81
+ /**
82
+ * CUDA texture address modes
83
+ */
84
+ enum __device_builtin__ cudaTextureAddressMode
85
+ {
86
+ cudaAddressModeWrap = 0, /**< Wrapping address mode */
87
+ cudaAddressModeClamp = 1, /**< Clamp to edge address mode */
88
+ cudaAddressModeMirror = 2, /**< Mirror address mode */
89
+ cudaAddressModeBorder = 3 /**< Border address mode */
90
+ };
91
+
92
+ /**
93
+ * CUDA texture filter modes
94
+ */
95
+ enum __device_builtin__ cudaTextureFilterMode
96
+ {
97
+ cudaFilterModePoint = 0, /**< Point filter mode */
98
+ cudaFilterModeLinear = 1 /**< Linear filter mode */
99
+ };
100
+
101
+ /**
102
+ * CUDA texture read modes
103
+ */
104
+ enum __device_builtin__ cudaTextureReadMode
105
+ {
106
+ cudaReadModeElementType = 0, /**< Read texture as specified element type */
107
+ cudaReadModeNormalizedFloat = 1 /**< Read texture as normalized float */
108
+ };
109
+
110
+ /**
111
+ * CUDA texture reference
112
+ */
113
+ struct __device_builtin__ textureReference
114
+ {
115
+ /**
116
+ * Indicates whether texture reads are normalized or not
117
+ */
118
+ int normalized;
119
+ /**
120
+ * Texture filter mode
121
+ */
122
+ enum cudaTextureFilterMode filterMode;
123
+ /**
124
+ * Texture address mode for up to 3 dimensions
125
+ */
126
+ enum cudaTextureAddressMode addressMode[3];
127
+ /**
128
+ * Channel descriptor for the texture reference
129
+ */
130
+ struct cudaChannelFormatDesc channelDesc;
131
+ /**
132
+ * Perform sRGB->linear conversion during texture read
133
+ */
134
+ int sRGB;
135
+ /**
136
+ * Limit to the anisotropy ratio
137
+ */
138
+ unsigned int maxAnisotropy;
139
+ /**
140
+ * Mipmap filter mode
141
+ */
142
+ enum cudaTextureFilterMode mipmapFilterMode;
143
+ /**
144
+ * Offset applied to the supplied mipmap level
145
+ */
146
+ float mipmapLevelBias;
147
+ /**
148
+ * Lower end of the mipmap level range to clamp access to
149
+ */
150
+ float minMipmapLevelClamp;
151
+ /**
152
+ * Upper end of the mipmap level range to clamp access to
153
+ */
154
+ float maxMipmapLevelClamp;
155
+ /**
156
+ * Disable any trilinear filtering optimizations.
157
+ */
158
+ int disableTrilinearOptimization;
159
+ int __cudaReserved[14];
160
+ };
161
+
162
+ /**
163
+ * CUDA texture descriptor
164
+ */
165
+ struct __device_builtin__ cudaTextureDesc
166
+ {
167
+ /**
168
+ * Texture address mode for up to 3 dimensions
169
+ */
170
+ enum cudaTextureAddressMode addressMode[3];
171
+ /**
172
+ * Texture filter mode
173
+ */
174
+ enum cudaTextureFilterMode filterMode;
175
+ /**
176
+ * Texture read mode
177
+ */
178
+ enum cudaTextureReadMode readMode;
179
+ /**
180
+ * Perform sRGB->linear conversion during texture read
181
+ */
182
+ int sRGB;
183
+ /**
184
+ * Texture Border Color
185
+ */
186
+ float borderColor[4];
187
+ /**
188
+ * Indicates whether texture reads are normalized or not
189
+ */
190
+ int normalizedCoords;
191
+ /**
192
+ * Limit to the anisotropy ratio
193
+ */
194
+ unsigned int maxAnisotropy;
195
+ /**
196
+ * Mipmap filter mode
197
+ */
198
+ enum cudaTextureFilterMode mipmapFilterMode;
199
+ /**
200
+ * Offset applied to the supplied mipmap level
201
+ */
202
+ float mipmapLevelBias;
203
+ /**
204
+ * Lower end of the mipmap level range to clamp access to
205
+ */
206
+ float minMipmapLevelClamp;
207
+ /**
208
+ * Upper end of the mipmap level range to clamp access to
209
+ */
210
+ float maxMipmapLevelClamp;
211
+ /**
212
+ * Disable any trilinear filtering optimizations.
213
+ */
214
+ int disableTrilinearOptimization;
215
+ /**
216
+ * Enable seamless cube map filtering.
217
+ */
218
+ int seamlessCubemap;
219
+ };
220
+
221
+ /**
222
+ * An opaque value that represents a CUDA texture object
223
+ */
224
+ typedef __device_builtin__ unsigned long long cudaTextureObject_t;
225
+
226
+ /** @} */
227
+ /** @} */ /* END CUDART_TYPES */
228
+
229
+ #endif /* !__TEXTURE_TYPES_H__ */
openflamingo/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__VECTOR_FUNCTIONS_H__)
51
+ #define __VECTOR_FUNCTIONS_H__
52
+
53
+ /*******************************************************************************
54
+ * *
55
+ * *
56
+ * *
57
+ *******************************************************************************/
58
+
59
+ #include "cuda_runtime_api.h"
60
+
61
+ #if defined(__CUDACC_RTC__)
62
+ #define __VECTOR_FUNCTIONS_DECL__ __host__ __device__
63
+ #else /* !__CUDACC_RTC__ */
64
+ #define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__
65
+ #endif /* __CUDACC_RTC__ */
66
+
67
+ /*******************************************************************************
68
+ * *
69
+ * *
70
+ * *
71
+ *******************************************************************************/
72
+
73
+ __VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x);
74
+
75
+ __VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x);
76
+
77
+ __VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y);
78
+
79
+ __VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y);
80
+
81
+ __VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z);
82
+
83
+ __VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z);
84
+
85
+ __VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w);
86
+
87
+ __VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w);
88
+
89
+ __VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x);
90
+
91
+ __VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x);
92
+
93
+ __VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y);
94
+
95
+ __VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y);
96
+
97
+ __VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z);
98
+
99
+ __VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z);
100
+
101
+ __VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w);
102
+
103
+ __VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w);
104
+
105
+ __VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x);
106
+
107
+ __VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x);
108
+
109
+ __VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y);
110
+
111
+ __VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y);
112
+
113
+ __VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z);
114
+
115
+ __VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z);
116
+
117
+ __VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w);
118
+
119
+ __VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w);
120
+
121
+ __VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x);
122
+
123
+ __VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x);
124
+
125
+ __VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y);
126
+
127
+ __VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y);
128
+
129
+ __VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z);
130
+
131
+ __VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z);
132
+
133
+ __VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w);
134
+
135
+ __VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w);
136
+
137
+ __VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x);
138
+
139
+ __VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y);
140
+
141
+ __VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z);
142
+
143
+ __VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w);
144
+
145
+ __VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x);
146
+
147
+ __VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x);
148
+
149
+ __VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y);
150
+
151
+ __VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y);
152
+
153
+ __VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z);
154
+
155
+ __VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z);
156
+
157
+ __VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w);
158
+
159
+ __VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w);
160
+
161
+ __VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x);
162
+
163
+ __VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y);
164
+
165
+ __VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z);
166
+
167
+ __VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w);
168
+
169
+ #undef __VECTOR_FUNCTIONS_DECL__
170
+
171
+ #if !defined(__CUDACC_RTC__)
172
+ #include "vector_functions.hpp"
173
+ #endif /* !__CUDACC_RTC__ */
174
+
175
+ #endif /* !__VECTOR_FUNCTIONS_H__ */
openflamingo/lib/python3.10/site-packages/tokenizers.libs/libk5crypto-b1f99d5c.so.3.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9844e5009e70a6ad2fb22b587306810fe2a7b1b9f6b9922daa1c78ba3466de27
3
+ size 219953
phi4/lib/python3.10/site-packages/PIL/__pycache__/FontFile.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/GimpGradientFile.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/ImageMath.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/MspImagePlugin.cpython-310.pyc ADDED
Binary file (3.47 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/PaletteFile.cpython-310.pyc ADDED
Binary file (1.54 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/PngImagePlugin.cpython-310.pyc ADDED
Binary file (33.8 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/QoiImagePlugin.cpython-310.pyc ADDED
Binary file (3.43 kB). View file
 
phi4/lib/python3.10/site-packages/PIL/__pycache__/report.cpython-310.pyc ADDED
Binary file (276 Bytes). View file
 
phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/LICENSE ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Except when otherwise stated (look for LICENSE files in directories or
3
+ information at the beginning of each file) all software and
4
+ documentation is licensed as follows:
5
+
6
+ The MIT License
7
+
8
+ Permission is hereby granted, free of charge, to any person
9
+ obtaining a copy of this software and associated documentation
10
+ files (the "Software"), to deal in the Software without
11
+ restriction, including without limitation the rights to use,
12
+ copy, modify, merge, publish, distribute, sublicense, and/or
13
+ sell copies of the Software, and to permit persons to whom the
14
+ Software is furnished to do so, subject to the following conditions:
15
+
16
+ The above copyright notice and this permission notice shall be included
17
+ in all copies or substantial portions of the Software.
18
+
19
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
+ DEALINGS IN THE SOFTWARE.
26
+
phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/METADATA ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: cffi
3
+ Version: 1.17.1
4
+ Summary: Foreign Function Interface for Python calling C code.
5
+ Home-page: http://cffi.readthedocs.org
6
+ Author: Armin Rigo, Maciej Fijalkowski
7
+ Author-email: python-cffi@googlegroups.com
8
+ License: MIT
9
+ Project-URL: Documentation, http://cffi.readthedocs.org/
10
+ Project-URL: Source Code, https://github.com/python-cffi/cffi
11
+ Project-URL: Issue Tracker, https://github.com/python-cffi/cffi/issues
12
+ Project-URL: Changelog, https://cffi.readthedocs.io/en/latest/whatsnew.html
13
+ Project-URL: Downloads, https://github.com/python-cffi/cffi/releases
14
+ Project-URL: Contact, https://groups.google.com/forum/#!forum/python-cffi
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Programming Language :: Python :: 3.13
23
+ Classifier: Programming Language :: Python :: Implementation :: CPython
24
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
25
+ Classifier: License :: OSI Approved :: MIT License
26
+ Requires-Python: >=3.8
27
+ License-File: LICENSE
28
+ Requires-Dist: pycparser
29
+
30
+
31
+ CFFI
32
+ ====
33
+
34
+ Foreign Function Interface for Python calling C code.
35
+ Please see the `Documentation <http://cffi.readthedocs.org/>`_.
36
+
37
+ Contact
38
+ -------
39
+
40
+ `Mailing list <https://groups.google.com/forum/#!forum/python-cffi>`_
phi4/lib/python3.10/site-packages/cffi-1.17.1.dist-info/RECORD ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _cffi_backend.cpython-310-x86_64-linux-gnu.so,sha256=pciUVwDoiYkGtuoos7gi5U2TSTeBHVoDkneECMzaObI,985520
2
+ cffi-1.17.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ cffi-1.17.1.dist-info/LICENSE,sha256=BLgPWwd7vtaICM_rreteNSPyqMmpZJXFh72W3x6sKjM,1294
4
+ cffi-1.17.1.dist-info/METADATA,sha256=u6nuvP_qPJKu2zvIbi2zkGzVu7KjnnRIYUFyIrOY3j4,1531
5
+ cffi-1.17.1.dist-info/RECORD,,
6
+ cffi-1.17.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ cffi-1.17.1.dist-info/WHEEL,sha256=AxiTY2sz_GcPOsKDeggQV_FGgAhpyJSKs70WYTq6kog,151
8
+ cffi-1.17.1.dist-info/entry_points.txt,sha256=y6jTxnyeuLnL-XJcDv8uML3n6wyYiGRg8MTp_QGJ9Ho,75
9
+ cffi-1.17.1.dist-info/top_level.txt,sha256=rE7WR3rZfNKxWI9-jn6hsHCAl7MDkB-FmuQbxWjFehQ,19
10
+ cffi/__init__.py,sha256=H6t_ebva6EeHpUuItFLW1gbRp94eZRNJODLaWKdbx1I,513
11
+ cffi/__pycache__/__init__.cpython-310.pyc,,
12
+ cffi/__pycache__/_imp_emulation.cpython-310.pyc,,
13
+ cffi/__pycache__/_shimmed_dist_utils.cpython-310.pyc,,
14
+ cffi/__pycache__/api.cpython-310.pyc,,
15
+ cffi/__pycache__/backend_ctypes.cpython-310.pyc,,
16
+ cffi/__pycache__/cffi_opcode.cpython-310.pyc,,
17
+ cffi/__pycache__/commontypes.cpython-310.pyc,,
18
+ cffi/__pycache__/cparser.cpython-310.pyc,,
19
+ cffi/__pycache__/error.cpython-310.pyc,,
20
+ cffi/__pycache__/ffiplatform.cpython-310.pyc,,
21
+ cffi/__pycache__/lock.cpython-310.pyc,,
22
+ cffi/__pycache__/model.cpython-310.pyc,,
23
+ cffi/__pycache__/pkgconfig.cpython-310.pyc,,
24
+ cffi/__pycache__/recompiler.cpython-310.pyc,,
25
+ cffi/__pycache__/setuptools_ext.cpython-310.pyc,,
26
+ cffi/__pycache__/vengine_cpy.cpython-310.pyc,,
27
+ cffi/__pycache__/vengine_gen.cpython-310.pyc,,
28
+ cffi/__pycache__/verifier.cpython-310.pyc,,
29
+ cffi/_cffi_errors.h,sha256=zQXt7uR_m8gUW-fI2hJg0KoSkJFwXv8RGUkEDZ177dQ,3908
30
+ cffi/_cffi_include.h,sha256=Exhmgm9qzHWzWivjfTe0D7Xp4rPUkVxdNuwGhMTMzbw,15055
31
+ cffi/_embedding.h,sha256=EDKw5QrLvQoe3uosXB3H1xPVTYxsn33eV3A43zsA_Fw,18787
32
+ cffi/_imp_emulation.py,sha256=RxREG8zAbI2RPGBww90u_5fi8sWdahpdipOoPzkp7C0,2960
33
+ cffi/_shimmed_dist_utils.py,sha256=Bjj2wm8yZbvFvWEx5AEfmqaqZyZFhYfoyLLQHkXZuao,2230
34
+ cffi/api.py,sha256=alBv6hZQkjpmZplBphdaRn2lPO9-CORs_M7ixabvZWI,42169
35
+ cffi/backend_ctypes.py,sha256=h5ZIzLc6BFVXnGyc9xPqZWUS7qGy7yFSDqXe68Sa8z4,42454
36
+ cffi/cffi_opcode.py,sha256=JDV5l0R0_OadBX_uE7xPPTYtMdmpp8I9UYd6av7aiDU,5731
37
+ cffi/commontypes.py,sha256=7N6zPtCFlvxXMWhHV08psUjdYIK2XgsN3yo5dgua_v4,2805
38
+ cffi/cparser.py,sha256=0qI3mEzZSNVcCangoyXOoAcL-RhpQL08eG8798T024s,44789
39
+ cffi/error.py,sha256=v6xTiS4U0kvDcy4h_BDRo5v39ZQuj-IMRYLv5ETddZs,877
40
+ cffi/ffiplatform.py,sha256=avxFjdikYGJoEtmJO7ewVmwG_VEVl6EZ_WaNhZYCqv4,3584
41
+ cffi/lock.py,sha256=l9TTdwMIMpi6jDkJGnQgE9cvTIR7CAntIJr8EGHt3pY,747
42
+ cffi/model.py,sha256=W30UFQZE73jL5Mx5N81YT77us2W2iJjTm0XYfnwz1cg,21797
43
+ cffi/parse_c_type.h,sha256=OdwQfwM9ktq6vlCB43exFQmxDBtj2MBNdK8LYl15tjw,5976
44
+ cffi/pkgconfig.py,sha256=LP1w7vmWvmKwyqLaU1Z243FOWGNQMrgMUZrvgFuOlco,4374
45
+ cffi/recompiler.py,sha256=sim4Tm7lamt2Jn8uzKN0wMYp6ODByk3g7of47-h9LD4,65367
46
+ cffi/setuptools_ext.py,sha256=-ebj79lO2_AUH-kRcaja2pKY1Z_5tloGwsJgzK8P3Cc,8871
47
+ cffi/vengine_cpy.py,sha256=8UagT6ZEOZf6Dju7_CfNulue8CnsHLEzJYhnqUhoF04,43752
48
+ cffi/vengine_gen.py,sha256=DUlEIrDiVin1Pnhn1sfoamnS5NLqfJcOdhRoeSNeJRg,26939
49
+ cffi/verifier.py,sha256=oX8jpaohg2Qm3aHcznidAdvrVm5N4sQYG0a3Eo5mIl4,11182