paredeyes commited on
Commit
b8c4a7d
·
verified ·
1 Parent(s): f25167b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. miniCUDA124/bin/nppig64_12.dll +3 -0
  3. miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/standard_layout_static_array.h +703 -0
  4. miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/static_array.h +325 -0
  5. miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/submdspan.h +609 -0
  6. miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/type_list.h +137 -0
  7. miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/addressof.h +98 -0
  8. miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/construct_at.h +275 -0
  9. miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h +347 -0
  10. miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/voidify.h +38 -0
  11. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/apply_cv.h +90 -0
  12. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/make_tuple_types.h +91 -0
  13. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/sfinae_helpers.h +211 -0
  14. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/structured_bindings.h +160 -0
  15. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_element.h +132 -0
  16. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_indices.h +42 -0
  17. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_like.h +66 -0
  18. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_size.h +74 -0
  19. miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_types.h +33 -0
  20. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_const.h +37 -0
  21. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_cv.h +37 -0
  22. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_lvalue_reference.h +61 -0
  23. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_pointer.h +63 -0
  24. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_rvalue_reference.h +62 -0
  25. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_volatile.h +37 -0
  26. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/aligned_storage.h +141 -0
  27. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/aligned_union.h +63 -0
  28. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/alignment_of.h +40 -0
  29. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/apply_cv.h +83 -0
  30. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/can_extract_key.h +64 -0
  31. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/common_reference.h +241 -0
  32. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/common_type.h +127 -0
  33. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/conditional.h +60 -0
  34. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/conjunction.h +66 -0
  35. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/copy_cv.h +62 -0
  36. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/copy_cvref.h +54 -0
  37. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/decay.h +83 -0
  38. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/dependent_type.h +32 -0
  39. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/disjunction.h +74 -0
  40. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/enable_if.h +38 -0
  41. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/extent.h +64 -0
  42. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/has_unique_object_representation.h +46 -0
  43. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/has_virtual_destructor.h +49 -0
  44. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/integral_constant.h +63 -0
  45. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_abstract.h +39 -0
  46. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_aggregate.h +43 -0
  47. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_allocator.h +44 -0
  48. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_arithmetic.h +42 -0
  49. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_array.h +62 -0
  50. miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_assignable.h +78 -0
.gitattributes CHANGED
@@ -89,3 +89,4 @@ miniCUDA124/bin/nppidei64_12.dll filter=lfs diff=lfs merge=lfs -text
89
  miniCUDA124/bin/nppitc64_12.dll filter=lfs diff=lfs merge=lfs -text
90
  miniCUDA124/bin/nvcc.exe filter=lfs diff=lfs merge=lfs -text
91
  miniCUDA124/bin/nppist64_12.dll filter=lfs diff=lfs merge=lfs -text
 
 
89
  miniCUDA124/bin/nppitc64_12.dll filter=lfs diff=lfs merge=lfs -text
90
  miniCUDA124/bin/nvcc.exe filter=lfs diff=lfs merge=lfs -text
91
  miniCUDA124/bin/nppist64_12.dll filter=lfs diff=lfs merge=lfs -text
92
+ miniCUDA124/bin/nppig64_12.dll filter=lfs diff=lfs merge=lfs -text
miniCUDA124/bin/nppig64_12.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d2f6940b8fa725b26b31aab185472a2b218e5563e01b2bcf020f6711ad43754
3
+ size 39356416
miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/standard_layout_static_array.h ADDED
@@ -0,0 +1,703 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_STANDARD_LAYOUT_STATIC_ARRAY_HPP
45
+ #define _LIBCUDACXX___MDSPAN_STANDARD_LAYOUT_STATIC_ARRAY_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/compressed_pair.h"
52
+ #include "../__mdspan/dynamic_extent.h"
53
+ #include "../__mdspan/macros.h"
54
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
55
+ #include "../__mdspan/no_unique_address.h"
56
+ #endif
57
+ #include "../__type_traits/enable_if.h"
58
+ #include "../__utility/integer_sequence.h"
59
+ #include "../array"
60
+ #include "../cstddef"
61
+ #include "../span"
62
+
63
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
64
+ # pragma GCC system_header
65
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
66
+ # pragma clang system_header
67
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
68
+ # pragma system_header
69
+ #endif // no system header
70
+
71
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
72
+
73
+ #if _LIBCUDACXX_STD_VER > 11
74
+
75
+ namespace __detail {
76
+
77
+ //==============================================================================
78
+
79
+ struct __construct_psa_from_dynamic_exts_values_tag_t {};
80
+ _LIBCUDACXX_CPO_ACCESSIBILITY __construct_psa_from_dynamic_exts_values_tag_t __construct_psa_from_dynamic_exts_values_tag;
81
+
82
+ struct __construct_psa_from_all_exts_values_tag_t {};
83
+ _LIBCUDACXX_CPO_ACCESSIBILITY __construct_psa_from_all_exts_values_tag_t __construct_psa_from_all_exts_values_tag;
84
+
85
+ struct __construct_psa_from_all_exts_array_tag_t {};
86
+ template <size_t _Np = 0>
87
+ struct __construct_psa_from_dynamic_exts_array_tag_t {};
88
+
89
+ //==============================================================================
90
+
91
+ template <size_t _Ip, class _Tp> using __repeated_with_idxs = _Tp;
92
+
93
+ //==============================================================================
94
+
95
+ #if __MDSPAN_PRESERVE_STANDARD_LAYOUT
96
+
97
+ /**
98
+ * PSA = "partially static array"
99
+ *
100
+ * @tparam _Tp
101
+ * @tparam _ValsSeq
102
+ * @tparam __sentinal
103
+ */
104
+ template <class _Tag, class _Tp, class _static_t, class _ValsSeq, _static_t __sentinal = static_cast<_static_t>(dynamic_extent),
105
+ class _IdxsSeq = _CUDA_VSTD::make_index_sequence<_ValsSeq::size()>>
106
+ struct __standard_layout_psa;
107
+
108
+ //==============================================================================
109
+ // Static case
110
+ template <class _Tag, class _Tp, class _static_t, _static_t __value, _static_t... __values_or_sentinals,
111
+ _static_t __sentinal, size_t _Idx, size_t... _Idxs>
112
+ struct __standard_layout_psa<
113
+ _Tag, _Tp, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __value, __values_or_sentinals...>,
114
+ __sentinal, _CUDA_VSTD::integer_sequence<size_t, _Idx, _Idxs...>>
115
+ #ifdef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
116
+ : private __no_unique_address_emulation<__standard_layout_psa<
117
+ _Tag, _Tp, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>, __sentinal,
118
+ _CUDA_VSTD::integer_sequence<size_t, _Idxs...>>>
119
+ #endif
120
+ {
121
+
122
+ //--------------------------------------------------------------------------
123
+
124
+ using __next_t =
125
+ __standard_layout_psa<_Tag, _Tp, _static_t,
126
+ _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>,
127
+ __sentinal, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>>;
128
+
129
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
130
+ _LIBCUDACXX_NO_UNIQUE_ADDRESS __next_t __next_;
131
+ #else
132
+ using __base_t = __no_unique_address_emulation<__next_t>;
133
+ #endif
134
+
135
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t &__next() noexcept {
136
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
137
+ return __next_;
138
+ #else
139
+ return this->__base_t::__ref();
140
+ #endif
141
+ }
142
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t const &__next() const noexcept {
143
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
144
+ return __next_;
145
+ #else
146
+ return this->__base_t::__ref();
147
+ #endif
148
+ }
149
+
150
+ static constexpr auto __size = sizeof...(_Idxs) + 1;
151
+ static constexpr auto __size_dynamic = __next_t::__size_dynamic;
152
+
153
+ //--------------------------------------------------------------------------
154
+
155
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
156
+ constexpr __standard_layout_psa() noexcept = default;
157
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
158
+ constexpr __standard_layout_psa(__standard_layout_psa const &) noexcept =
159
+ default;
160
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
161
+ constexpr __standard_layout_psa(__standard_layout_psa &&) noexcept = default;
162
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
163
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa &
164
+ operator=(__standard_layout_psa const &) noexcept = default;
165
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
166
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa &
167
+ operator=(__standard_layout_psa &&) noexcept = default;
168
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
169
+ ~__standard_layout_psa() noexcept = default;
170
+
171
+ //--------------------------------------------------------------------------
172
+
173
+ __MDSPAN_INLINE_FUNCTION
174
+ constexpr __standard_layout_psa(
175
+ __construct_psa_from_all_exts_values_tag_t, _Tp const & /*__val*/,
176
+ __repeated_with_idxs<_Idxs, _Tp> const &... __vals) noexcept
177
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
178
+ : __next_{
179
+ #else
180
+ : __base_t(__base_t{__next_t(
181
+ #endif
182
+ __construct_psa_from_all_exts_values_tag, __vals...
183
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
184
+ }
185
+ #else
186
+ )})
187
+ #endif
188
+ { }
189
+
190
+ template <class... _Ts>
191
+ __MDSPAN_INLINE_FUNCTION constexpr __standard_layout_psa(
192
+ __construct_psa_from_dynamic_exts_values_tag_t,
193
+ _Ts const &... __vals) noexcept
194
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
195
+ : __next_{
196
+ #else
197
+ : __base_t(__base_t{__next_t(
198
+ #endif
199
+ __construct_psa_from_dynamic_exts_values_tag, __vals...
200
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
201
+ }
202
+ #else
203
+ )})
204
+ #endif
205
+ { }
206
+
207
+ template <class _Up, size_t _Np>
208
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
209
+ array<_Up, _Np> const &__vals) noexcept
210
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
211
+ : __next_{
212
+ #else
213
+ : __base_t(__base_t{__next_t(
214
+ #endif
215
+ __vals
216
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
217
+ }
218
+ #else
219
+ )})
220
+ #endif
221
+ { }
222
+
223
+ template <class _Up, size_t _NStatic>
224
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
225
+ __construct_psa_from_all_exts_array_tag_t const & __tag,
226
+ array<_Up, _NStatic> const &__vals) noexcept
227
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
228
+ : __next_{
229
+ #else
230
+ : __base_t(__base_t{__next_t(
231
+ #endif
232
+ __tag, __vals
233
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
234
+ }
235
+ #else
236
+ )})
237
+ #endif
238
+ { }
239
+
240
+ template <class _Up, size_t _IDynamic, size_t _NDynamic>
241
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
242
+ __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic> __tag,
243
+ array<_Up, _NDynamic> const &__vals) noexcept
244
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
245
+ : __next_{
246
+ #else
247
+ : __base_t(__base_t{__next_t(
248
+ #endif
249
+ __tag, __vals
250
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
251
+ }
252
+ #else
253
+ )})
254
+ #endif
255
+ { }
256
+
257
+ template <class _Up, size_t _Np>
258
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
259
+ _CUDA_VSTD::span<_Up, _Np> const &__vals) noexcept
260
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
261
+ : __next_{
262
+ #else
263
+ : __base_t(__base_t{__next_t(
264
+ #endif
265
+ __vals
266
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
267
+ }
268
+ #else
269
+ )})
270
+ #endif
271
+ { }
272
+
273
+ template <class _Up, size_t _NStatic>
274
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
275
+ __construct_psa_from_all_exts_array_tag_t const & __tag,
276
+ _CUDA_VSTD::span<_Up, _NStatic> const &__vals) noexcept
277
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
278
+ : __next_{
279
+ #else
280
+ : __base_t(__base_t{__next_t(
281
+ #endif
282
+ __tag, __vals
283
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
284
+ }
285
+ #else
286
+ )})
287
+ #endif
288
+ { }
289
+
290
+ template <class _Up, size_t _IDynamic, size_t _NDynamic>
291
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
292
+ __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic> __tag,
293
+ _CUDA_VSTD::span<_Up, _NDynamic> const &__vals) noexcept
294
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
295
+ : __next_{
296
+ #else
297
+ : __base_t(__base_t{__next_t(
298
+ #endif
299
+ __tag, __vals
300
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
301
+ }
302
+ #else
303
+ )})
304
+ #endif
305
+ { }
306
+
307
+ template <class _UTag, class _Up, class _static_U, class _UValsSeq, _static_U __u_sentinal,
308
+ class _IdxsSeq>
309
+ __MDSPAN_INLINE_FUNCTION constexpr __standard_layout_psa(
310
+ __standard_layout_psa<_UTag, _Up, _static_U, _UValsSeq, __u_sentinal, _IdxsSeq> const
311
+ &__rhs) noexcept
312
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
313
+ : __next_{
314
+ #else
315
+ : __base_t(__base_t{__next_t(
316
+ #endif
317
+ __rhs.__next()
318
+ #ifndef _LIBCUDACXX_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS
319
+ }
320
+ #else
321
+ )})
322
+ #endif
323
+ { }
324
+
325
+ //--------------------------------------------------------------------------
326
+
327
+ // See https://godbolt.org/z/_KSDNX for a summary-by-example of why this is
328
+ // necessary. We're using inheritance here instead of an alias template
329
+ // because we have to deduce __values_or_sentinals in several places, and
330
+ // alias templates don't permit that in this context.
331
+ __MDSPAN_FORCE_INLINE_FUNCTION
332
+ constexpr __standard_layout_psa const &__enable_psa_conversion() const
333
+ noexcept {
334
+ return *this;
335
+ }
336
+
337
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip != _Idx, int> = 0>
338
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept {
339
+ return __next().template __get_n<_Ip>();
340
+ }
341
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip == _Idx, int> = 1>
342
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept {
343
+ return __value;
344
+ }
345
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip != _Idx, int> = 0>
346
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr void
347
+ __set_n(_Tp const &__rhs) noexcept {
348
+ __next().__set_value(__rhs);
349
+ }
350
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip == _Idx, int> = 1>
351
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr void
352
+ __set_n(_Tp const &) noexcept {
353
+ // Don't assert here because that would break constexpr. This better
354
+ // not change anything, though
355
+ }
356
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip == _Idx, _static_t> = __sentinal>
357
+ __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept {
358
+ return __value;
359
+ }
360
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip != _Idx, _static_t> __default = __sentinal>
361
+ __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept {
362
+ return __next_t::template __get_static_n<_Ip, __default>();
363
+ }
364
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t __n) const noexcept {
365
+ return __value * (_Tp(_Idx == __n)) + __next().__get(__n);
366
+ }
367
+
368
+ //--------------------------------------------------------------------------
369
+ };
370
+
371
+ //==============================================================================
372
+
373
+ // Dynamic case, __next_t may or may not be empty
374
+ template <class _Tag, class _Tp, class _static_t, _static_t __sentinal, _static_t... __values_or_sentinals,
375
+ size_t _Idx, size_t... _Idxs>
376
+ struct __standard_layout_psa<
377
+ _Tag, _Tp, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __sentinal, __values_or_sentinals...>,
378
+ __sentinal, _CUDA_VSTD::integer_sequence<size_t, _Idx, _Idxs...>> {
379
+ //--------------------------------------------------------------------------
380
+
381
+ using __next_t =
382
+ __standard_layout_psa<_Tag, _Tp, _static_t,
383
+ _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>,
384
+ __sentinal, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>>;
385
+
386
+ using __value_pair_t = __compressed_pair<_Tp, __next_t>;
387
+ __value_pair_t __value_pair;
388
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t &__next() noexcept {
389
+ return __value_pair.__second();
390
+ }
391
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t const &__next() const noexcept {
392
+ return __value_pair.__second();
393
+ }
394
+
395
+ static constexpr auto __size = sizeof...(_Idxs) + 1;
396
+ static constexpr auto __size_dynamic = 1 + __next_t::__size_dynamic;
397
+
398
+ //--------------------------------------------------------------------------
399
+
400
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
401
+ constexpr __standard_layout_psa() noexcept = default;
402
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
403
+ constexpr __standard_layout_psa(__standard_layout_psa const &) noexcept =
404
+ default;
405
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
406
+ constexpr __standard_layout_psa(__standard_layout_psa &&) noexcept = default;
407
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
408
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa &
409
+ operator=(__standard_layout_psa const &) noexcept = default;
410
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
411
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa &
412
+ operator=(__standard_layout_psa &&) noexcept = default;
413
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
414
+ ~__standard_layout_psa() noexcept = default;
415
+
416
+ //--------------------------------------------------------------------------
417
+
418
+ __MDSPAN_INLINE_FUNCTION
419
+ constexpr __standard_layout_psa(
420
+ __construct_psa_from_all_exts_values_tag_t, _Tp const &__val,
421
+ __repeated_with_idxs<_Idxs, _Tp> const &... __vals) noexcept
422
+ : __value_pair(__val,
423
+ __next_t(__construct_psa_from_all_exts_values_tag,
424
+ __vals...)) {}
425
+
426
+ template <class... _Ts>
427
+ __MDSPAN_INLINE_FUNCTION constexpr __standard_layout_psa(
428
+ __construct_psa_from_dynamic_exts_values_tag_t, _Tp const &__val,
429
+ _Ts const &... __vals) noexcept
430
+ : __value_pair(__val,
431
+ __next_t(__construct_psa_from_dynamic_exts_values_tag,
432
+ __vals...)) {}
433
+
434
+ template <class _Up, size_t _Np>
435
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
436
+ array<_Up, _Np> const &__vals) noexcept
437
+ : __value_pair(_CUDA_VSTD::get<_Idx>(__vals), __vals) {}
438
+
439
+ template <class _Up, size_t _NStatic>
440
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
441
+ __construct_psa_from_all_exts_array_tag_t __tag,
442
+ array<_Up, _NStatic> const &__vals) noexcept
443
+ : __value_pair(
444
+ _CUDA_VSTD::get<_Idx>(__vals),
445
+ __next_t(__tag,
446
+ __vals)) {}
447
+
448
+ template <class _Up, size_t _IDynamic, size_t _NDynamic>
449
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
450
+ __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>,
451
+ array<_Up, _NDynamic> const &__vals) noexcept
452
+ : __value_pair(
453
+ _CUDA_VSTD::get<_IDynamic>(__vals),
454
+ __next_t(__construct_psa_from_dynamic_exts_array_tag_t<_IDynamic + 1>{},
455
+ __vals)) {}
456
+
457
+ template <class _Up, size_t _Np>
458
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
459
+ _CUDA_VSTD::span<_Up, _Np> const &__vals) noexcept
460
+ : __value_pair(__vals[_Idx], __vals) {}
461
+
462
+ template <class _Up, size_t _NStatic>
463
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
464
+ __construct_psa_from_all_exts_array_tag_t __tag,
465
+ _CUDA_VSTD::span<_Up, _NStatic> const &__vals) noexcept
466
+ : __value_pair(
467
+ __vals[_Idx],
468
+ __next_t(__tag,
469
+ __vals)) {}
470
+
471
+ template <class _Up, size_t _IDynamic, size_t _NDynamic>
472
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
473
+ __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>,
474
+ _CUDA_VSTD::span<_Up, _NDynamic> const &__vals) noexcept
475
+ : __value_pair(
476
+ __vals[_IDynamic],
477
+ __next_t(__construct_psa_from_dynamic_exts_array_tag_t<_IDynamic + 1>{},
478
+ __vals)) {}
479
+
480
+ template <class _UTag, class _Up, class _static_U, class _UValsSeq, _static_U __u_sentinal,
481
+ class _UIdxsSeq>
482
+ __MDSPAN_INLINE_FUNCTION constexpr __standard_layout_psa(
483
+ __standard_layout_psa<_UTag, _Up, _static_U, _UValsSeq, __u_sentinal, _UIdxsSeq> const
484
+ &__rhs) noexcept
485
+ : __value_pair(__rhs.template __get_n<_Idx>(), __rhs.__next()) {}
486
+
487
+ //--------------------------------------------------------------------------
488
+
489
+ // See comment in the previous partial specialization for why this is
490
+ // necessary. Or just trust me that it's messy.
491
+ __MDSPAN_FORCE_INLINE_FUNCTION
492
+ constexpr __standard_layout_psa const &__enable_psa_conversion() const
493
+ noexcept {
494
+ return *this;
495
+ }
496
+
497
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip != _Idx, int> = 0>
498
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept {
499
+ return __next().template __get_n<_Ip>();
500
+ }
501
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip == _Idx, int> = 1>
502
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept {
503
+ return __value_pair.__first();
504
+ }
505
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip != _Idx, int> = 0>
506
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr void
507
+ __set_n(_Tp const &__rhs) noexcept {
508
+ __next().__set_value(__rhs);
509
+ }
510
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip == _Idx, int> = 1>
511
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr void
512
+ __set_n(_Tp const &__rhs) noexcept {
513
+ __value_pair.__first() = __rhs;
514
+ }
515
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip == _Idx, _static_t> __default = __sentinal>
516
+ __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept {
517
+ return __default;
518
+ }
519
+ template <size_t _Ip, _CUDA_VSTD::enable_if_t<_Ip != _Idx, _static_t> __default = __sentinal>
520
+ __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept {
521
+ return __next_t::template __get_static_n<_Ip, __default>();
522
+ }
523
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t __n) const noexcept {
524
+ return __value_pair.__first() * (_Tp(_Idx == __n)) + __next().__get(__n);
525
+ }
526
+
527
+ //--------------------------------------------------------------------------
528
+ };
529
+
530
+ // empty/terminal case
531
+ template <class _Tag, class _Tp, class _static_t, _static_t __sentinal>
532
+ struct __standard_layout_psa<_Tag, _Tp, _static_t, _CUDA_VSTD::integer_sequence<_static_t>, __sentinal,
533
+ _CUDA_VSTD::integer_sequence<size_t>> {
534
+ //--------------------------------------------------------------------------
535
+
536
+ static constexpr auto __size = 0;
537
+ static constexpr auto __size_dynamic = 0;
538
+
539
+ //--------------------------------------------------------------------------
540
+
541
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
542
+ constexpr __standard_layout_psa() noexcept = default;
543
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
544
+ constexpr __standard_layout_psa(__standard_layout_psa const &) noexcept = default;
545
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
546
+ constexpr __standard_layout_psa(__standard_layout_psa &&) noexcept = default;
547
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
548
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa &
549
+ operator=(__standard_layout_psa const &) noexcept = default;
550
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
551
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa &
552
+ operator=(__standard_layout_psa &&) noexcept = default;
553
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
554
+ ~__standard_layout_psa() noexcept = default;
555
+
556
+ __MDSPAN_INLINE_FUNCTION
557
+ constexpr __standard_layout_psa(
558
+ __construct_psa_from_all_exts_values_tag_t) noexcept {}
559
+
560
+ template <class... _Ts>
561
+ __MDSPAN_INLINE_FUNCTION constexpr __standard_layout_psa(
562
+ __construct_psa_from_dynamic_exts_values_tag_t) noexcept {}
563
+
564
+ template <class _Up, size_t _Np>
565
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
566
+ array<_Up, _Np> const &) noexcept {}
567
+
568
+ template <class _Up, size_t _NStatic>
569
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
570
+ __construct_psa_from_all_exts_array_tag_t,
571
+ array<_Up, _NStatic> const &) noexcept {}
572
+
573
+ template <class _Up, size_t _IDynamic, size_t _NDynamic>
574
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
575
+ __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>,
576
+ array<_Up, _NDynamic> const &) noexcept {}
577
+
578
+ template <class _Up, size_t _Np>
579
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
580
+ _CUDA_VSTD::span<_Up, _Np> const &) noexcept {}
581
+
582
+ template <class _Up, size_t _NStatic>
583
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
584
+ __construct_psa_from_all_exts_array_tag_t,
585
+ _CUDA_VSTD::span<_Up, _NStatic> const &) noexcept {}
586
+
587
+ template <class _Up, size_t _IDynamic, size_t _NDynamic>
588
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __standard_layout_psa(
589
+ __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>,
590
+ _CUDA_VSTD::span<_Up, _NDynamic> const &) noexcept {}
591
+
592
+ template <class _UTag, class _Up, class _static_U, class _UValsSeq, _static_U __u_sentinal,
593
+ class _UIdxsSeq>
594
+ __MDSPAN_INLINE_FUNCTION constexpr __standard_layout_psa(
595
+ __standard_layout_psa<_UTag, _Up, _static_U, _UValsSeq, __u_sentinal, _UIdxsSeq> const&) noexcept {}
596
+
597
+ // See comment in the previous partial specialization for why this is
598
+ // necessary. Or just trust me that it's messy.
599
+ __MDSPAN_FORCE_INLINE_FUNCTION
600
+ constexpr __standard_layout_psa const &__enable_psa_conversion() const
601
+ noexcept {
602
+ return *this;
603
+ }
604
+
605
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t /*n*/) const noexcept {
606
+ return 0;
607
+ }
608
+ };
609
+
610
+ // Same thing, but with a disambiguator so that same-base issues doesn't cause
611
+ // a loss of standard-layout-ness.
612
+ template <class _Tag, class T, class _static_t, _static_t... __values_or_sentinals>
613
+ struct __partially_static_sizes_tagged
614
+ : __standard_layout_psa<
615
+ _Tag, T, _static_t,
616
+ _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>> {
617
+ using __tag_t = _Tag;
618
+ using __psa_impl_t = __standard_layout_psa<
619
+ _Tag, T, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>>;
620
+ #if defined(_LIBCUDACXX_COMPILER_NVRTC) \
621
+ || defined(_LIBCUDACXX_CUDACC_BELOW_11_3)
622
+ template<class... _Args, __enable_if_t<_LIBCUDACXX_TRAIT(is_constructible, __psa_impl_t, _Args...), int> = 0>
623
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr
624
+ __partially_static_sizes_tagged(_Args&&... __args) noexcept(noexcept(__psa_impl_t(_CUDA_VSTD::declval<_Args>()...)))
625
+ : __psa_impl_t(_CUDA_VSTD::forward<_Args>(__args)...)
626
+ {}
627
+ #else // ^^^ _LIBCUDACXX_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3 vvv
628
+ using __psa_impl_t::__psa_impl_t;
629
+ #endif // !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3
630
+ #ifdef __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND
631
+ __MDSPAN_INLINE_FUNCTION
632
+ #endif
633
+ constexpr __partially_static_sizes_tagged() noexcept
634
+ #ifdef __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND
635
+ : __psa_impl_t() { }
636
+ #else
637
+ = default;
638
+ #endif
639
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
640
+ constexpr __partially_static_sizes_tagged(
641
+ __partially_static_sizes_tagged const &) noexcept = default;
642
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
643
+ constexpr __partially_static_sizes_tagged(
644
+ __partially_static_sizes_tagged &&) noexcept = default;
645
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
646
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_sizes_tagged &
647
+ operator=(__partially_static_sizes_tagged const &) noexcept = default;
648
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
649
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_sizes_tagged &
650
+ operator=(__partially_static_sizes_tagged &&) noexcept = default;
651
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
652
+ ~__partially_static_sizes_tagged() noexcept = default;
653
+
654
+ template <class _UTag>
655
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr explicit __partially_static_sizes_tagged(
656
+ __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...> const& __vals
657
+ ) noexcept : __psa_impl_t(__vals.__enable_psa_conversion()) { }
658
+ };
659
+
660
+ struct __no_tag {};
661
+ template <class T, class _static_t, _static_t... __values_or_sentinals>
662
+ struct __partially_static_sizes
663
+ : __partially_static_sizes_tagged<__no_tag, T, _static_t, __values_or_sentinals...> {
664
+ private:
665
+ using __base_t =
666
+ __partially_static_sizes_tagged<__no_tag, T, _static_t, __values_or_sentinals...>;
667
+ template <class _UTag>
668
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes(
669
+ __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...>&& __vals
670
+ ) noexcept : __base_t(_CUDA_VSTD::move(__vals)) { }
671
+ public:
672
+ #if defined(_LIBCUDACXX_COMPILER_NVRTC) \
673
+ || defined(_LIBCUDACXX_CUDACC_BELOW_11_3)
674
+ template<class... _Args, __enable_if_t<_LIBCUDACXX_TRAIT(is_constructible, __base_t, _Args...), int> = 0>
675
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr
676
+ __partially_static_sizes(_Args&&... __args) noexcept(noexcept(__base_t(_CUDA_VSTD::declval<_Args>()...)))
677
+ : __base_t(_CUDA_VSTD::forward<_Args>(__args)...)
678
+ {}
679
+ #else // ^^^ _LIBCUDACXX_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3 vvv
680
+ using __base_t::__base_t;
681
+ #endif // !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3
682
+
683
+ #ifdef __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND
684
+ __MDSPAN_INLINE_FUNCTION
685
+ constexpr __partially_static_sizes() noexcept : __base_t() { }
686
+ #endif
687
+ template <class _UTag>
688
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes_tagged<
689
+ _UTag, T, _static_t, __values_or_sentinals...>
690
+ __with_tag() const noexcept {
691
+ return __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...>(*this);
692
+ }
693
+ };
694
+
695
+ #endif // __MDSPAN_PRESERVE_STATIC_LAYOUT
696
+
697
+ } // end namespace __detail
698
+
699
+ #endif // _LIBCUDACXX_STD_VER > 11
700
+
701
+ _LIBCUDACXX_END_NAMESPACE_STD
702
+
703
+ #endif // _LIBCUDACXX___MDSPAN_STANDARD_LAYOUT_STATIC_ARRAY_HPP
miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/static_array.h ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_STATIC_ARRAY_HPP
45
+ #define _LIBCUDACXX___MDSPAN_STATIC_ARRAY_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__fwd/span.h" // dynamic_extent
52
+ #include "../__mdspan/dynamic_extent.h"
53
+ #include "../__mdspan/macros.h"
54
+ #include "../__mdspan/maybe_static_value.h"
55
+ #include "../__mdspan/standard_layout_static_array.h"
56
+ #include "../__mdspan/type_list.h"
57
+ #include "../__utility/integer_sequence.h"
58
+ #include "../array"
59
+ #include "../cstddef"
60
+
61
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
62
+ # pragma GCC system_header
63
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
64
+ # pragma clang system_header
65
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
66
+ # pragma system_header
67
+ #endif // no system header
68
+
69
+ #if !__MDSPAN_PRESERVE_STANDARD_LAYOUT
70
+
71
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
72
+
73
+ #if _LIBCUDACXX_STD_VER > 11
74
+
75
+ namespace __detail {
76
+
77
+ //==============================================================================
78
+
79
+ template <class _Tp, _Tp _Val, bool _Mask> struct __mask_element {};
80
+
81
+ template <class _Tp, _Tp... _Result>
82
+ struct __mask_sequence_assign_op {
83
+ template <_Tp _V>
84
+ __mask_sequence_assign_op<_Tp, _Result..., _V>
85
+ operator=(__mask_element<_Tp, _V, true>&&);
86
+ template <_Tp _V>
87
+ __mask_sequence_assign_op<_Tp, _Result...>
88
+ operator=(__mask_element<_Tp, _V, false>&&);
89
+ using __result = _CUDA_VSTD::integer_sequence<_Tp, _Result...>;
90
+ };
91
+
92
+ template <class _Seq, class _Mask>
93
+ struct __mask_sequence;
94
+
95
+ template <class _Tp, _Tp... _Vals, bool... _Masks>
96
+ struct __mask_sequence<_CUDA_VSTD::integer_sequence<_Tp, _Vals...>, _CUDA_VSTD::integer_sequence<bool, _Masks...>>
97
+ {
98
+ using type = typename decltype(
99
+ __MDSPAN_FOLD_ASSIGN_LEFT(
100
+ __mask_sequence_assign_op<_Tp>{}, /* = ... = */ __mask_element<_Tp, _Vals, _Masks>{}
101
+ )
102
+ )::__result;
103
+ };
104
+
105
+ //==============================================================================
106
+
107
+ template <class _Tp, class _static_t, class _Vals, _static_t __sentinal,
108
+ class _Idxs, class _IdxsDynamic, class _IdxsDynamicIdxs>
109
+ class __partially_static_array_impl;
110
+
111
+ template <
112
+ class _Tp, class _static_t,
113
+ _static_t... __values_or_sentinals, _static_t __sentinal,
114
+ size_t... _Idxs,
115
+ size_t... _IdxsDynamic,
116
+ size_t... _IdxsDynamicIdxs
117
+ >
118
+ class __partially_static_array_impl<
119
+ _Tp,
120
+ _static_t,
121
+ _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>,
122
+ __sentinal,
123
+ _CUDA_VSTD::integer_sequence<size_t, _Idxs...>,
124
+ _CUDA_VSTD::integer_sequence<size_t, _IdxsDynamic...>,
125
+ _CUDA_VSTD::integer_sequence<size_t, _IdxsDynamicIdxs...>
126
+ >
127
+ : private __maybe_static_value<_Tp, _static_t, __values_or_sentinals, __sentinal,
128
+ _Idxs>... {
129
+ private:
130
+
131
+ template <size_t _Np>
132
+ using __base_n = typename __type_at<_Np,
133
+ __type_list<__maybe_static_value<_Tp, _static_t, __values_or_sentinals, __sentinal, _Idxs>...>
134
+ >::type;
135
+
136
+ public:
137
+
138
+ static constexpr auto __size = sizeof...(_Idxs);
139
+ static constexpr auto __size_dynamic =
140
+ __MDSPAN_FOLD_PLUS_RIGHT(static_cast<int>((__values_or_sentinals == __sentinal)), /* + ... + */ 0);
141
+
142
+ //--------------------------------------------------------------------------
143
+
144
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
145
+ constexpr __partially_static_array_impl() = default;
146
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
147
+ constexpr __partially_static_array_impl(
148
+ __partially_static_array_impl const &) noexcept = default;
149
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
150
+ constexpr __partially_static_array_impl(
151
+ __partially_static_array_impl &&) noexcept = default;
152
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
153
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_array_impl &
154
+ operator=(__partially_static_array_impl const &) noexcept = default;
155
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
156
+ __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_array_impl &
157
+ operator=(__partially_static_array_impl &&) noexcept = default;
158
+ __MDSPAN_INLINE_FUNCTION_DEFAULTED
159
+ ~__partially_static_array_impl() noexcept = default;
160
+
161
+ __MDSPAN_INLINE_FUNCTION
162
+ constexpr __partially_static_array_impl(
163
+ __construct_psa_from_all_exts_values_tag_t,
164
+ __repeated_with_idxs<_Idxs, _Tp> const &... __vals) noexcept
165
+ : __base_n<_Idxs>(__base_n<_Idxs>{{__vals}})... {}
166
+
167
+ __MDSPAN_INLINE_FUNCTION
168
+ constexpr __partially_static_array_impl(
169
+ __construct_psa_from_dynamic_exts_values_tag_t,
170
+ __repeated_with_idxs<_IdxsDynamicIdxs, _Tp> const &... __vals) noexcept
171
+ : __base_n<_IdxsDynamic>(__base_n<_IdxsDynamic>{{__vals}})... {}
172
+
173
+ __MDSPAN_INLINE_FUNCTION constexpr explicit __partially_static_array_impl(
174
+ _CUDA_VSTD::array<_Tp, sizeof...(_Idxs)> const& __vals) noexcept
175
+ : __partially_static_array_impl(
176
+ __construct_psa_from_all_exts_values_tag,
177
+ _CUDA_VSTD::get<_Idxs>(__vals)...) {}
178
+
179
+ // clang-format off
180
+ __MDSPAN_FUNCTION_REQUIRES(
181
+ (__MDSPAN_INLINE_FUNCTION constexpr explicit),
182
+ __partially_static_array_impl,
183
+ (_CUDA_VSTD::array<_Tp, __size_dynamic> const &__vals), noexcept,
184
+ /* requires */
185
+ (sizeof...(_Idxs) != __size_dynamic)
186
+ ): __partially_static_array_impl(
187
+ __construct_psa_from_dynamic_exts_values_tag,
188
+ _CUDA_VSTD::get<_IdxsDynamicIdxs>(__vals)...) {}
189
+ // clang-format on
190
+
191
+ template <class _Up, class _static_u, class _UValsSeq, _static_u __u_sentinal, class _UIdxsSeq,
192
+ class _UIdxsDynamicSeq, class _UIdxsDynamicIdxsSeq>
193
+ __MDSPAN_INLINE_FUNCTION constexpr __partially_static_array_impl(
194
+ __partially_static_array_impl<
195
+ _Up, _static_u, _UValsSeq, __u_sentinal, _UIdxsSeq,
196
+ _UIdxsDynamicSeq, _UIdxsDynamicIdxsSeq> const &__rhs) noexcept
197
+ : __partially_static_array_impl(
198
+ __construct_psa_from_all_exts_values_tag,
199
+ __rhs.template __get_n<_Idxs>()...) {}
200
+
201
+ //--------------------------------------------------------------------------
202
+
203
+ // See comment in the previous partial specialization for why this is
204
+ // necessary. Or just trust me that it's messy.
205
+ __MDSPAN_FORCE_INLINE_FUNCTION
206
+ constexpr __partially_static_array_impl const &__enable_psa_conversion() const
207
+ noexcept {
208
+ return *this;
209
+ }
210
+
211
+ template <size_t _Ip>
212
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept {
213
+ return static_cast<__base_n<_Ip> const*>(this)->__value();
214
+ }
215
+
216
+ template <class _Up, size_t _Ip>
217
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_n(_Up&& __rhs) noexcept {
218
+ static_cast<__base_n<_Ip>*>(this)->__set_value((_Up&&)__rhs);
219
+ }
220
+
221
+ template <size_t _Ip, _static_t __default = __sentinal>
222
+ __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t
223
+ __get_static_n() noexcept {
224
+ return __base_n<_Ip>::__static_value == __sentinal ?
225
+ __default : __base_n<_Ip>::__static_value;
226
+ }
227
+
228
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp
229
+ __get(size_t __n) const noexcept {
230
+ return __MDSPAN_FOLD_PLUS_RIGHT(
231
+ (_Tp(_Idxs == __n) * __get_n<_Idxs>()), /* + ... + */ _Tp(0)
232
+ );
233
+ }
234
+
235
+ };
236
+
237
+ //==============================================================================
238
+
239
+ template <class _Tp, class _static_t, class _ValSeq, _static_t __sentinal, class _Idxs = _CUDA_VSTD::make_index_sequence<_ValSeq::size()>>
240
+ struct __partially_static_array_impl_maker;
241
+
242
+ template <
243
+ class _Tp, class _static_t, _static_t... _Vals, _static_t __sentinal, size_t... _Idxs
244
+ >
245
+ struct __partially_static_array_impl_maker<
246
+ _Tp, _static_t, _CUDA_VSTD::integer_sequence<_static_t, _Vals...>, __sentinal, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>
247
+ >
248
+ {
249
+ using __dynamic_idxs = typename __mask_sequence<
250
+ _CUDA_VSTD::integer_sequence<size_t, _Idxs...>,
251
+ _CUDA_VSTD::integer_sequence<bool, (_Vals == __sentinal)...>
252
+ >::type;
253
+ using __impl_base =
254
+ __partially_static_array_impl<_Tp, _static_t,
255
+ _CUDA_VSTD::integer_sequence<_static_t, _Vals...>,
256
+ __sentinal, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>,
257
+ __dynamic_idxs,
258
+ _CUDA_VSTD::make_index_sequence<__dynamic_idxs::size()>
259
+ >;
260
+ };
261
+
262
+ template <class _Tp, class _static_t, class _ValsSeq, _static_t __sentinal = dynamic_extent>
263
+ class __partially_static_array_with_sentinal
264
+ : public __partially_static_array_impl_maker<_Tp, _static_t, _ValsSeq, __sentinal>::__impl_base
265
+ {
266
+ private:
267
+ using __base_t = typename __partially_static_array_impl_maker<_Tp, _static_t, _ValsSeq, __sentinal>::__impl_base;
268
+ public:
269
+ #if defined(_LIBCUDACXX_COMPILER_NVRTC) \
270
+ || defined(_LIBCUDACXX_CUDACC_BELOW_11_3)
271
+ constexpr __partially_static_array_with_sentinal() = default;
272
+
273
+ template<class... _Args>
274
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr
275
+ __partially_static_array_with_sentinal(_Args&&... __args) noexcept(noexcept(__base_t(_CUDA_VSTD::declval<_Args>()...)))
276
+ : __base_t(_CUDA_VSTD::forward<_Args>(__args)...)
277
+ {}
278
+ #else // ^^^ _LIBCUDACXX_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3 vvv
279
+ using __base_t::__base_t;
280
+ #endif // !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3
281
+ };
282
+
283
+ //==============================================================================
284
+
285
+ template <class T, class _static_t, _static_t... __values_or_sentinals>
286
+ struct __partially_static_sizes :
287
+ __partially_static_array_with_sentinal<
288
+ T, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>>
289
+ {
290
+ private:
291
+ using __base_t = __partially_static_array_with_sentinal<
292
+ T, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>>;
293
+ public:
294
+ #if defined(_LIBCUDACXX_COMPILER_NVRTC) \
295
+ || defined(_LIBCUDACXX_CUDACC_BELOW_11_3)
296
+ constexpr __partially_static_sizes() = default;
297
+
298
+ template<class... _Args>
299
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr
300
+ __partially_static_sizes(_Args&&... __args) noexcept(noexcept(__base_t(_CUDA_VSTD::declval<_Args>()...)))
301
+ : __base_t(_CUDA_VSTD::forward<_Args>(__args)...)
302
+ {}
303
+ #else // ^^^ _LIBCUDACXX_COMPILER_NVRTC || nvcc < 11.3 ^^^ / vvv !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3 vvv
304
+ using __base_t::__base_t;
305
+ #endif // !_LIBCUDACXX_COMPILER_NVRTC || nvcc >= 11.3
306
+ template <class _UTag>
307
+ __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes<T, _static_t, __values_or_sentinals...>
308
+ __with_tag() const noexcept {
309
+ return *this;
310
+ }
311
+ };
312
+
313
+ // Tags are needed for the standard layout version, but not here
314
+ template <class T, class _static_t, _static_t... __values_or_sentinals>
315
+ using __partially_static_sizes_tagged = __partially_static_sizes<T, _static_t, __values_or_sentinals...>;
316
+
317
+ } // end namespace __detail
318
+
319
+ #endif // _LIBCUDACXX_STD_VER > 11
320
+
321
+ _LIBCUDACXX_END_NAMESPACE_STD
322
+
323
+ #endif // !__MDSPAN_PRESERVE_STANDARD_LAYOUT
324
+
325
+ #endif // _LIBCUDACXX___MDSPAN_STATIC_ARRAY_HPP
miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/submdspan.h ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+
45
+ #ifndef _LIBCUDACXX___MDSPAN_SUBMDSPAN_HPP
46
+ #define _LIBCUDACXX___MDSPAN_SUBMDSPAN_HPP
47
+
48
+ #ifndef __cuda_std__
49
+ #include <__config>
50
+ #endif // __cuda_std__
51
+
52
+ #include "../__mdspan/dynamic_extent.h"
53
+ #include "../__mdspan/full_extent_t.h"
54
+ #include "../__mdspan/layout_left.h"
55
+ #include "../__mdspan/layout_right.h"
56
+ #include "../__mdspan/layout_stride.h"
57
+ #include "../__mdspan/macros.h"
58
+ #include "../__mdspan/mdspan.h"
59
+ #include "../__type_traits/conditional.h"
60
+ #include "../__type_traits/integral_constant.h"
61
+ #include "../__type_traits/is_convertible.h"
62
+ #include "../__type_traits/is_same.h"
63
+ #include "../__type_traits/is_signed.h"
64
+ #include "../__type_traits/remove_const.h"
65
+ #include "../__type_traits/remove_reference.h"
66
+ #include "../__utility/move.h"
67
+ #include "../__utility/pair.h"
68
+ #include "../tuple"
69
+
70
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
71
+ # pragma GCC system_header
72
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
73
+ # pragma clang system_header
74
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
75
+ # pragma system_header
76
+ #endif // no system header
77
+
78
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
79
+
80
+ #if _LIBCUDACXX_STD_VER > 11
81
+
82
+ namespace __detail {
83
+
84
+ template <size_t _OldExtent, size_t _OldStaticStride, class _Tp>
85
+ struct __slice_wrap {
86
+ _Tp slice;
87
+ size_t old_extent;
88
+ size_t old_stride;
89
+ };
90
+
91
+ //--------------------------------------------------------------------------------
92
+
93
+ template <size_t _OldExtent, size_t _OldStaticStride>
94
+ __MDSPAN_INLINE_FUNCTION constexpr
95
+ __slice_wrap<_OldExtent, _OldStaticStride, size_t>
96
+ __wrap_slice(size_t __val, size_t __ext, size_t __stride) { return { __val, __ext, __stride }; }
97
+
98
+ template <size_t _OldExtent, size_t _OldStaticStride, class _IntegerType, _IntegerType _Value0>
99
+ __MDSPAN_INLINE_FUNCTION constexpr
100
+ __slice_wrap<_OldExtent, _OldStaticStride, integral_constant<_IntegerType, _Value0>>
101
+ __wrap_slice(size_t __val, size_t __ext, integral_constant<_IntegerType, _Value0> __stride)
102
+ {
103
+ #if __MDSPAN_HAS_CXX_17
104
+ if constexpr (_CUDA_VSTD::is_signed_v<_IntegerType>) {
105
+ static_assert(_Value0 >= _IntegerType(0), "Invalid slice specifier");
106
+ }
107
+ #endif // __MDSPAN_HAS_CXX_17
108
+
109
+ return { __val, __ext, __stride };
110
+ }
111
+
112
+ template <size_t _OldExtent, size_t _OldStaticStride>
113
+ __MDSPAN_INLINE_FUNCTION constexpr
114
+ __slice_wrap<_OldExtent, _OldStaticStride, full_extent_t>
115
+ __wrap_slice(full_extent_t __val, size_t __ext, size_t __stride) { return { __val, __ext, __stride }; }
116
+
117
+ // TODO generalize this to anything that works with get<0> and get<1>
118
+ template <size_t _OldExtent, size_t _OldStaticStride>
119
+ __MDSPAN_INLINE_FUNCTION constexpr
120
+ __slice_wrap<_OldExtent, _OldStaticStride, _CUDA_VSTD::tuple<size_t, size_t>>
121
+ __wrap_slice(_CUDA_VSTD::tuple<size_t, size_t> const& __val, size_t __ext, size_t __stride)
122
+ {
123
+ return { __val, __ext, __stride };
124
+ }
125
+
126
+ template <size_t _OldExtent, size_t _OldStaticStride,
127
+ class _IntegerType0, _IntegerType0 _Value0,
128
+ class _IntegerType1, _IntegerType1 _Value1>
129
+ __MDSPAN_INLINE_FUNCTION constexpr
130
+ __slice_wrap<_OldExtent, _OldStaticStride,
131
+ _CUDA_VSTD::tuple<integral_constant<_IntegerType0, _Value0>,
132
+ integral_constant<_IntegerType1, _Value1>>>
133
+ __wrap_slice(_CUDA_VSTD::tuple<integral_constant<_IntegerType0, _Value0>, integral_constant<_IntegerType1, _Value1>> const& __val, size_t __ext, size_t __stride)
134
+ {
135
+ static_assert(_Value1 >= _Value0, "Invalid slice tuple");
136
+ return { __val, __ext, __stride };
137
+ }
138
+
139
+ //--------------------------------------------------------------------------------
140
+
141
+
142
+ // a layout right remains a layout right if it is indexed by 0 or more scalars,
143
+ // then optionally a pair and finally 0 or more all
144
+ template <
145
+ // what we encountered until now preserves the layout right
146
+ bool _Result=true,
147
+ // we only encountered 0 or more scalars, no pair or all
148
+ bool _EncounteredOnlyScalar=true
149
+ >
150
+ struct preserve_layout_right_analysis : integral_constant<bool, _Result> {
151
+ using layout_type_if_preserved = layout_right;
152
+ using encounter_pair = preserve_layout_right_analysis<
153
+ // if we encounter a pair, the layout remains a layout right only if it was one before
154
+ // and that only scalars were encountered until now
155
+ _Result && _EncounteredOnlyScalar,
156
+ // if we encounter a pair, we didn't encounter scalars only
157
+ false
158
+ >;
159
+ using encounter_all = preserve_layout_right_analysis<
160
+ // if we encounter a all, the layout remains a layout right if it was one before
161
+ _Result,
162
+ // if we encounter a all, we didn't encounter scalars only
163
+ false
164
+ >;
165
+ using encounter_scalar = preserve_layout_right_analysis<
166
+ // if we encounter a scalar, the layout remains a layout right only if it was one before
167
+ // and that only scalars were encountered until now
168
+ _Result && _EncounteredOnlyScalar,
169
+ // if we encounter a scalar, the fact that we encountered scalars only doesn't change
170
+ _EncounteredOnlyScalar
171
+ >;
172
+ };
173
+
174
+ // a layout left remains a layout left if it is indexed by 0 or more all,
175
+ // then optionally a pair and finally 0 or more scalars
176
+ template <
177
+ bool _Result=true,
178
+ bool _EncounteredOnlyAll=true
179
+ >
180
+ struct preserve_layout_left_analysis : integral_constant<bool, _Result> {
181
+ using layout_type_if_preserved = layout_left;
182
+ using encounter_pair = preserve_layout_left_analysis<
183
+ // if we encounter a pair, the layout remains a layout left only if it was one before
184
+ // and that only all were encountered until now
185
+ _Result && _EncounteredOnlyAll,
186
+ // if we encounter a pair, we didn't encounter all only
187
+ false
188
+ >;
189
+ using encounter_all = preserve_layout_left_analysis<
190
+ // if we encounter a all, the layout remains a layout left only if it was one before
191
+ // and that only all were encountered until now
192
+ _Result && _EncounteredOnlyAll,
193
+ // if we encounter a all, the fact that we encountered scalars all doesn't change
194
+ _EncounteredOnlyAll
195
+ >;
196
+ using encounter_scalar = preserve_layout_left_analysis<
197
+ // if we encounter a scalar, the layout remains a layout left if it was one before
198
+ _Result,
199
+ // if we encounter a scalar, we didn't encounter scalars only
200
+ false
201
+ >;
202
+ };
203
+
204
+ struct ignore_layout_preservation : integral_constant<bool, false> {
205
+ using layout_type_if_preserved = void;
206
+ using encounter_pair = ignore_layout_preservation;
207
+ using encounter_all = ignore_layout_preservation;
208
+ using encounter_scalar = ignore_layout_preservation;
209
+ };
210
+
211
+ template <class _Layout>
212
+ struct preserve_layout_analysis
213
+ : ignore_layout_preservation { };
214
+ template <>
215
+ struct preserve_layout_analysis<layout_right>
216
+ : preserve_layout_right_analysis<> { };
217
+ template <>
218
+ struct preserve_layout_analysis<layout_left>
219
+ : preserve_layout_left_analysis<> { };
220
+
221
+ //--------------------------------------------------------------------------------
222
+
223
+ template <
224
+ class _IndexT,
225
+ class _PreserveLayoutAnalysis,
226
+ class _OffsetsArray=__partially_static_sizes<_IndexT, size_t>,
227
+ class _ExtsArray=__partially_static_sizes<_IndexT, size_t>,
228
+ class _StridesArray=__partially_static_sizes<_IndexT, size_t>,
229
+ class = _CUDA_VSTD::make_index_sequence<_OffsetsArray::__size>,
230
+ class = _CUDA_VSTD::make_index_sequence<_ExtsArray::__size>,
231
+ class = _CUDA_VSTD::make_index_sequence<_StridesArray::__size>
232
+ >
233
+ struct __assign_op_slice_handler;
234
+
235
+ /* clang-format: off */
236
+ template <
237
+ class _IndexT,
238
+ class _PreserveLayoutAnalysis,
239
+ size_t... _Offsets,
240
+ size_t... _Exts,
241
+ size_t... _Strides,
242
+ size_t... _OffsetIdxs,
243
+ size_t... _ExtIdxs,
244
+ size_t... _StrideIdxs>
245
+ struct __assign_op_slice_handler<
246
+ _IndexT,
247
+ _PreserveLayoutAnalysis,
248
+ __partially_static_sizes<_IndexT, size_t, _Offsets...>,
249
+ __partially_static_sizes<_IndexT, size_t, _Exts...>,
250
+ __partially_static_sizes<_IndexT, size_t, _Strides...>,
251
+ _CUDA_VSTD::integer_sequence<size_t, _OffsetIdxs...>,
252
+ _CUDA_VSTD::integer_sequence<size_t, _ExtIdxs...>,
253
+ _CUDA_VSTD::integer_sequence<size_t, _StrideIdxs...>>
254
+ {
255
+ // TODO remove this for better compiler performance
256
+ static_assert(
257
+ __MDSPAN_FOLD_AND((_Strides == dynamic_extent || _Strides > 0) /* && ... */),
258
+ " "
259
+ );
260
+ static_assert(
261
+ __MDSPAN_FOLD_AND((_Offsets == dynamic_extent || _Offsets >= 0) /* && ... */),
262
+ " "
263
+ );
264
+
265
+ using __offsets_storage_t = __partially_static_sizes<_IndexT, size_t, _Offsets...>;
266
+ using __extents_storage_t = __partially_static_sizes<_IndexT, size_t, _Exts...>;
267
+ using __strides_storage_t = __partially_static_sizes<_IndexT, size_t, _Strides...>;
268
+ __offsets_storage_t __offsets;
269
+ __extents_storage_t __exts;
270
+ __strides_storage_t __strides;
271
+
272
+ #ifdef __INTEL_COMPILER
273
+ #if __INTEL_COMPILER <= 1800
274
+ __MDSPAN_INLINE_FUNCTION constexpr __assign_op_slice_handler(__assign_op_slice_handler&& __other) noexcept
275
+ : __offsets(_CUDA_VSTD::move(__other.__offsets)), __exts(_CUDA_VSTD::move(__other.__exts)), __strides(_CUDA_VSTD::move(__other.__strides))
276
+ { }
277
+ __MDSPAN_INLINE_FUNCTION constexpr __assign_op_slice_handler(
278
+ __offsets_storage_t&& __o,
279
+ __extents_storage_t&& __e,
280
+ __strides_storage_t&& __s
281
+ ) noexcept
282
+ : __offsets(_CUDA_VSTD::move(__o)), __exts(_CUDA_VSTD::move(__e)), __strides(_CUDA_VSTD::move(__s))
283
+ { }
284
+ #endif
285
+ #endif
286
+
287
+ // Don't define this unless we need it; they have a cost to compile
288
+ #ifndef __MDSPAN_USE_RETURN_TYPE_DEDUCTION
289
+ using __extents_type = _CUDA_VSTD::extents<_IndexT, _Exts...>;
290
+ #endif
291
+
292
+ // For size_t slice, skip the extent and stride, but add an offset corresponding to the value
293
+ template <size_t _OldStaticExtent, size_t _OldStaticStride>
294
+ __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator)
295
+ constexpr auto
296
+ operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, size_t>&& __slice) noexcept
297
+ -> __assign_op_slice_handler<
298
+ _IndexT,
299
+ typename _PreserveLayoutAnalysis::encounter_scalar,
300
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>,
301
+ __partially_static_sizes<_IndexT, size_t, _Exts...>,
302
+ __partially_static_sizes<_IndexT, size_t, _Strides...>/* intentional space here to work around ICC bug*/> {
303
+ return {
304
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>(
305
+ __construct_psa_from_all_exts_values_tag,
306
+ __offsets.template __get_n<_OffsetIdxs>()..., __slice.slice),
307
+ _CUDA_VSTD::move(__exts),
308
+ _CUDA_VSTD::move(__strides)
309
+ };
310
+ }
311
+
312
+ // Treat integral_constant slice like size_t slice, but with a compile-time offset.
313
+ // The result's extents_type can't take advantage of that,
314
+ // but it might help for specialized layouts.
315
+ template <size_t _OldStaticExtent, size_t _OldStaticStride, class _IntegerType, _IntegerType _Value0>
316
+ __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator)
317
+ constexpr auto
318
+ operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, integral_constant<_IntegerType, _Value0>>&&) noexcept
319
+ -> __assign_op_slice_handler<
320
+ _IndexT,
321
+ typename _PreserveLayoutAnalysis::encounter_scalar,
322
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., _Value0>,
323
+ __partially_static_sizes<_IndexT, size_t, _Exts...>,
324
+ __partially_static_sizes<_IndexT, size_t, _Strides...>/* intentional space here to work around ICC bug*/> {
325
+ #if __MDSPAN_HAS_CXX_17
326
+ if constexpr (_CUDA_VSTD::is_signed_v<_IntegerType>) {
327
+ static_assert(_Value0 >= _IntegerType(0), "Invalid slice specifier");
328
+ }
329
+ #endif // __MDSPAN_HAS_CXX_17
330
+ return {
331
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., _Value0>(
332
+ __construct_psa_from_all_exts_values_tag,
333
+ __offsets.template __get_n<_OffsetIdxs>()..., size_t(_Value0)),
334
+ _CUDA_VSTD::move(__exts),
335
+ _CUDA_VSTD::move(__strides)
336
+ };
337
+ }
338
+
339
+ // For a _CUDA_VSTD::full_extent, offset 0 and old extent
340
+ template <size_t _OldStaticExtent, size_t _OldStaticStride>
341
+ __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator)
342
+ constexpr auto
343
+ operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, full_extent_t>&& __slice) noexcept
344
+ -> __assign_op_slice_handler<
345
+ _IndexT,
346
+ typename _PreserveLayoutAnalysis::encounter_all,
347
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., 0>,
348
+ __partially_static_sizes<_IndexT, size_t, _Exts..., _OldStaticExtent>,
349
+ __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>/* intentional space here to work around ICC bug*/> {
350
+ return {
351
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., 0>(
352
+ __construct_psa_from_all_exts_values_tag,
353
+ __offsets.template __get_n<_OffsetIdxs>()..., size_t(0)),
354
+ __partially_static_sizes<_IndexT, size_t, _Exts..., _OldStaticExtent>(
355
+ __construct_psa_from_all_exts_values_tag,
356
+ __exts.template __get_n<_ExtIdxs>()..., __slice.old_extent),
357
+ __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>(
358
+ __construct_psa_from_all_exts_values_tag,
359
+ __strides.template __get_n<_StrideIdxs>()..., __slice.old_stride)
360
+ };
361
+ }
362
+
363
+ // For a _CUDA_VSTD::tuple, add an offset and add a new dynamic extent (strides still preserved)
364
+ template <size_t _OldStaticExtent, size_t _OldStaticStride>
365
+ __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator)
366
+ constexpr auto
367
+ operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, tuple<size_t, size_t>>&& __slice) noexcept
368
+ -> __assign_op_slice_handler<
369
+ _IndexT,
370
+ typename _PreserveLayoutAnalysis::encounter_pair,
371
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>,
372
+ __partially_static_sizes<_IndexT, size_t, _Exts..., dynamic_extent>,
373
+ __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>/* intentional space here to work around ICC bug*/> {
374
+ return {
375
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>(
376
+ __construct_psa_from_all_exts_values_tag,
377
+ __offsets.template __get_n<_OffsetIdxs>()..., _CUDA_VSTD::get<0>(__slice.slice)),
378
+ __partially_static_sizes<_IndexT, size_t, _Exts..., dynamic_extent>(
379
+ __construct_psa_from_all_exts_values_tag,
380
+ __exts.template __get_n<_ExtIdxs>()..., _CUDA_VSTD::get<1>(__slice.slice) - _CUDA_VSTD::get<0>(__slice.slice)),
381
+ __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>(
382
+ __construct_psa_from_all_exts_values_tag,
383
+ __strides.template __get_n<_StrideIdxs>()..., __slice.old_stride)
384
+ };
385
+ }
386
+
387
+ // For a _CUDA_VSTD::tuple of two integral_constant, do something like
388
+ // we did above for a tuple of two size_t, but make sure the
389
+ // result's extents type make the values compile-time constants.
390
+ template <size_t _OldStaticExtent, size_t _OldStaticStride,
391
+ class _IntegerType0, _IntegerType0 _Value0,
392
+ class _IntegerType1, _IntegerType1 _Value1>
393
+ __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator)
394
+ constexpr auto
395
+ operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, tuple<integral_constant<_IntegerType0, _Value0>, integral_constant<_IntegerType1, _Value1>>>&& __slice) noexcept
396
+ -> __assign_op_slice_handler<
397
+ _IndexT,
398
+ typename _PreserveLayoutAnalysis::encounter_pair,
399
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., size_t(_Value0)>,
400
+ __partially_static_sizes<_IndexT, size_t, _Exts..., size_t(_Value1 - _Value0)>,
401
+ __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>/* intentional space here to work around ICC bug*/> {
402
+ static_assert(_Value1 >= _Value0, "Invalid slice specifier");
403
+ return {
404
+ // We're still turning the template parameters _Value0 and _Value1
405
+ // into (constexpr) run-time values here.
406
+ __partially_static_sizes<_IndexT, size_t, _Offsets..., size_t(_Value0) > (
407
+ __construct_psa_from_all_exts_values_tag,
408
+ __offsets.template __get_n<_OffsetIdxs>()..., _Value0),
409
+ __partially_static_sizes<_IndexT, size_t, _Exts..., size_t(_Value1 - _Value0) > (
410
+ __construct_psa_from_all_exts_values_tag,
411
+ __exts.template __get_n<_ExtIdxs>()..., _Value1 - _Value0),
412
+ __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>(
413
+ __construct_psa_from_all_exts_values_tag,
414
+ __strides.template __get_n<_StrideIdxs>()..., __slice.old_stride)
415
+ };
416
+ }
417
+
418
+ // TODO defer instantiation of this?
419
+ using layout_type = conditional_t<
420
+ _PreserveLayoutAnalysis::value,
421
+ typename _PreserveLayoutAnalysis::layout_type_if_preserved,
422
+ layout_stride
423
+ >;
424
+
425
+ // TODO noexcept specification
426
+ template <class NewLayout>
427
+ __MDSPAN_INLINE_FUNCTION
428
+ __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(
429
+ (
430
+ constexpr /* auto */
431
+ _make_layout_mapping_impl(NewLayout) noexcept
432
+ ),
433
+ (
434
+ /* not layout stride, so don't pass dynamic_strides */
435
+ /* return */ typename NewLayout::template mapping<_CUDA_VSTD::extents<_IndexT, _Exts...>>(
436
+ extents<_IndexT, _Exts...>::__make_extents_impl(_CUDA_VSTD::move(__exts))
437
+ ) /* ; */
438
+ )
439
+ )
440
+
441
+ __MDSPAN_INLINE_FUNCTION
442
+ __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(
443
+ (
444
+ constexpr /* auto */
445
+ _make_layout_mapping_impl(layout_stride) noexcept
446
+ ),
447
+ (
448
+ /* return */ layout_stride::template mapping<_CUDA_VSTD::extents<_IndexT, _Exts...>>
449
+ ::__make_mapping(_CUDA_VSTD::move(__exts), _CUDA_VSTD::move(__strides)) /* ; */
450
+ )
451
+ )
452
+
453
+ template <class _OldLayoutMapping> // mostly for deferred instantiation, but maybe we'll use this in the future
454
+ __MDSPAN_INLINE_FUNCTION
455
+ __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(
456
+ (
457
+ constexpr /* auto */
458
+ make_layout_mapping(_OldLayoutMapping const&) noexcept
459
+ ),
460
+ (
461
+ /* return */ this->_make_layout_mapping_impl(layout_type{}) /* ; */
462
+ )
463
+ )
464
+ };
465
+
466
+ //==============================================================================
467
+
468
+ #if __MDSPAN_USE_RETURN_TYPE_DEDUCTION
469
+ // Forking this because the C++11 version will be *completely* unreadable
470
+ template <class _ET, class _ST, size_t... _Exts, class _LP, class _AP, class... _SliceSpecs, size_t... _Idxs>
471
+ __MDSPAN_INLINE_FUNCTION
472
+ constexpr auto _submdspan_impl(
473
+ _CUDA_VSTD::integer_sequence<size_t, _Idxs...>,
474
+ mdspan<_ET, _CUDA_VSTD::extents<_ST, _Exts...>, _LP, _AP> const& __src,
475
+ _SliceSpecs&&... __slices
476
+ ) noexcept
477
+ {
478
+ using __index_t = _ST;
479
+ auto __handled =
480
+ __MDSPAN_FOLD_ASSIGN_LEFT(
481
+ (
482
+ __detail::__assign_op_slice_handler<
483
+ __index_t,
484
+ __detail::preserve_layout_analysis<_LP>
485
+ >{
486
+ __partially_static_sizes<__index_t, size_t>{},
487
+ __partially_static_sizes<__index_t, size_t>{},
488
+ __partially_static_sizes<__index_t, size_t>{}
489
+ }
490
+ ),
491
+ /* = ... = */
492
+ __detail::__wrap_slice<
493
+ _Exts, dynamic_extent
494
+ >(
495
+ __slices, __src.extents().template __extent<_Idxs>(),
496
+ __src.mapping().stride(_Idxs)
497
+ )
498
+ );
499
+
500
+ size_t __offset_size = __src.mapping()(__handled.__offsets.template __get_n<_Idxs>()...);
501
+ auto __offset_ptr = __src.accessor().offset(__src.data_handle(), __offset_size);
502
+ auto __map = __handled.make_layout_mapping(__src.mapping());
503
+ auto __acc_pol = typename _AP::offset_policy(__src.accessor());
504
+ return mdspan<
505
+ _ET, remove_const_t<_CUDA_VSTD::remove_reference_t<decltype(__map.extents())>>,
506
+ typename decltype(__handled)::layout_type, remove_const_t<_CUDA_VSTD::remove_reference_t<decltype(__acc_pol)>>
507
+ >(
508
+ _CUDA_VSTD::move(__offset_ptr), _CUDA_VSTD::move(__map), _CUDA_VSTD::move(__acc_pol)
509
+ );
510
+ }
511
+ #else
512
+
513
+ template <class _ET, class _AP, class _Src, class _Handled, size_t... _Idxs>
514
+ auto _submdspan_impl_helper(_Src&& __src, _Handled&& __h, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>)
515
+ -> mdspan<
516
+ _ET, typename _Handled::__extents_type, typename _Handled::layout_type, typename _AP::offset_policy
517
+ >
518
+ {
519
+ return {
520
+ __src.accessor().offset(__src.data_handle(), __src.mapping()(__h.__offsets.template __get_n<_Idxs>()...)),
521
+ __h.make_layout_mapping(__src.mapping()),
522
+ typename _AP::offset_policy(__src.accessor())
523
+ };
524
+ }
525
+
526
+ template <class _ET, class _ST, size_t... _Exts, class _LP, class _AP, class... _SliceSpecs, size_t... _Idxs>
527
+ __MDSPAN_INLINE_FUNCTION
528
+ __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(
529
+ (
530
+ constexpr /* auto */ _submdspan_impl(
531
+ _CUDA_VSTD::integer_sequence<size_t, _Idxs...> __seq,
532
+ mdspan<_ET, _CUDA_VSTD::extents<_ST, _Exts...>, _LP, _AP> const& __src,
533
+ _SliceSpecs&&... __slices
534
+ ) noexcept
535
+ ),
536
+ (
537
+ /* return */ _submdspan_impl_helper<_ET, _AP>(
538
+ __src,
539
+ __MDSPAN_FOLD_ASSIGN_LEFT(
540
+ (
541
+ __detail::__assign_op_slice_handler<
542
+ size_t,
543
+ __detail::preserve_layout_analysis<_LP>
544
+ >{
545
+ __partially_static_sizes<_ST, size_t>{},
546
+ __partially_static_sizes<_ST, size_t>{},
547
+ __partially_static_sizes<_ST, size_t>{}
548
+ }
549
+ ),
550
+ /* = ... = */
551
+ __detail::__wrap_slice<
552
+ _Exts, dynamic_extent
553
+ >(
554
+ __slices, __src.extents().template __extent<_Idxs>(), __src.mapping().stride(_Idxs)
555
+ )
556
+ ),
557
+ __seq
558
+ ) /* ; */
559
+ )
560
+ )
561
+
562
+ #endif
563
+
564
+ template <class _Tp> struct _is_layout_stride : false_type { };
565
+ template<>
566
+ struct _is_layout_stride<
567
+ layout_stride
568
+ > : true_type
569
+ { };
570
+
571
+ } // namespace __detail
572
+
573
+ //==============================================================================
574
+
575
+ __MDSPAN_TEMPLATE_REQUIRES(
576
+ class _ET, class _EXT, class _LP, class _AP, class... _SliceSpecs,
577
+ /* requires */ (
578
+ (
579
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_same, _LP, layout_left)
580
+ || _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_same, _LP, layout_right)
581
+ || __detail::_is_layout_stride<_LP>::value
582
+ ) &&
583
+ __MDSPAN_FOLD_AND((
584
+ _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _SliceSpecs, size_t)
585
+ || _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _SliceSpecs, tuple<size_t, size_t>)
586
+ || _LIBCUDACXX_TRAIT(_CUDA_VSTD::is_convertible, _SliceSpecs, full_extent_t)
587
+ ) /* && ... */) &&
588
+ sizeof...(_SliceSpecs) == _EXT::rank()
589
+ )
590
+ )
591
+ __MDSPAN_INLINE_FUNCTION
592
+ __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(
593
+ (
594
+ constexpr submdspan(
595
+ mdspan<_ET, _EXT, _LP, _AP> const& __src, _SliceSpecs... __slices
596
+ ) noexcept
597
+ ),
598
+ (
599
+ /* return */
600
+ __detail::_submdspan_impl(_CUDA_VSTD::make_index_sequence<sizeof...(_SliceSpecs)>{}, __src, __slices...) /*;*/
601
+ )
602
+ )
603
+ /* clang-format: on */
604
+
605
+ #endif // _LIBCUDACXX_STD_VER > 11
606
+
607
+ _LIBCUDACXX_END_NAMESPACE_STD
608
+
609
+ #endif // _LIBCUDACXX___MDSPAN_SUBMDSPAN_HPP
miniCUDA124/include/cuda/std/detail/libcxx/include/__mdspan/type_list.h ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ //@HEADER
3
+ // ************************************************************************
4
+ //
5
+ // Kokkos v. 2.0
6
+ // Copyright (2019) Sandia Corporation
7
+ //
8
+ // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9
+ // the U.S. Government retains certain rights in this software.
10
+ //
11
+ // Redistribution and use in source and binary forms, with or without
12
+ // modification, are permitted provided that the following conditions are
13
+ // met:
14
+ //
15
+ // 1. Redistributions of source code must retain the above copyright
16
+ // notice, this list of conditions and the following disclaimer.
17
+ //
18
+ // 2. Redistributions in binary form must reproduce the above copyright
19
+ // notice, this list of conditions and the following disclaimer in the
20
+ // documentation and/or other materials provided with the distribution.
21
+ //
22
+ // 3. Neither the name of the Corporation nor the names of the
23
+ // contributors may be used to endorse or promote products derived from
24
+ // this software without specific prior written permission.
25
+ //
26
+ // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37
+ //
38
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
39
+ //
40
+ // ************************************************************************
41
+ //@HEADER
42
+ */
43
+
44
+ #ifndef _LIBCUDACXX___MDSPAN_TYPE_LIST_HPP
45
+ #define _LIBCUDACXX___MDSPAN_TYPE_LIST_HPP
46
+
47
+ #ifndef __cuda_std__
48
+ #include <__config>
49
+ #endif // __cuda_std__
50
+
51
+ #include "../__mdspan/macros.h"
52
+ #include "../__utility/integer_sequence.h"
53
+
54
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
55
+ # pragma GCC system_header
56
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
57
+ # pragma clang system_header
58
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
59
+ # pragma system_header
60
+ #endif // no system header
61
+
62
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
63
+
64
+ #if _LIBCUDACXX_STD_VER > 11
65
+
66
+ //==============================================================================
67
+
68
+ namespace __detail {
69
+
70
+ template <class... _Ts> struct __type_list { static constexpr auto __size = sizeof...(_Ts); };
71
+
72
+ // Implementation of type_list at() that's heavily optimized for small typelists
73
+ template <size_t, class> struct __type_at;
74
+ template <size_t, class _Seq, class=_CUDA_VSTD::make_index_sequence<_Seq::__size>> struct __type_at_large_impl;
75
+
76
+ template <size_t _Ip, size_t _Idx, class _Tp>
77
+ struct __type_at_entry { };
78
+
79
+ template <class _Result>
80
+ struct __type_at_assign_op_ignore_rest {
81
+ template <class _Tp>
82
+ _LIBCUDACXX_HOST_DEVICE
83
+ __type_at_assign_op_ignore_rest<_Result> operator=(_Tp&&);
84
+ using type = _Result;
85
+ };
86
+
87
+ struct __type_at_assign_op_impl {
88
+ template <size_t _Ip, size_t _Idx, class _Tp>
89
+ _LIBCUDACXX_HOST_DEVICE
90
+ __type_at_assign_op_impl operator=(__type_at_entry<_Ip, _Idx, _Tp>&&);
91
+ template <size_t _Ip, class _Tp>
92
+ _LIBCUDACXX_HOST_DEVICE
93
+ __type_at_assign_op_ignore_rest<_Tp> operator=(__type_at_entry<_Ip, _Ip, _Tp>&&);
94
+ };
95
+
96
+ template <size_t _Ip, class... _Ts, size_t... _Idxs>
97
+ struct __type_at_large_impl<_Ip, __type_list<_Ts...>, _CUDA_VSTD::integer_sequence<size_t, _Idxs...>>
98
+ : decltype(
99
+ __MDSPAN_FOLD_ASSIGN_LEFT(__type_at_assign_op_impl{}, /* = ... = */ __type_at_entry<_Ip, _Idxs, _Ts>{})
100
+ )
101
+ { };
102
+
103
+ template <size_t _Ip, class... _Ts>
104
+ struct __type_at<_Ip, __type_list<_Ts...>>
105
+ : __type_at_large_impl<_Ip, __type_list<_Ts...>>
106
+ { };
107
+
108
+ template <class _T0, class... _Ts>
109
+ struct __type_at<0, __type_list<_T0, _Ts...>> {
110
+ using type = _T0;
111
+ };
112
+
113
+ template <class _T0, class _T1, class... _Ts>
114
+ struct __type_at<1, __type_list<_T0, _T1, _Ts...>> {
115
+ using type = _T1;
116
+ };
117
+
118
+ template <class _T0, class _T1, class _T2, class... _Ts>
119
+ struct __type_at<2, __type_list<_T0, _T1, _T2, _Ts...>> {
120
+ using type = _T2;
121
+ };
122
+
123
+ template <class _T0, class _T1, class _T2, class _T3, class... _Ts>
124
+ struct __type_at<3, __type_list<_T0, _T1, _T2, _T3, _Ts...>> {
125
+ using type = _T3;
126
+ };
127
+
128
+
129
+ } // namespace __detail
130
+
131
+ //==============================================================================
132
+
133
+ #endif // _LIBCUDACXX_STD_VER > 11
134
+
135
+ _LIBCUDACXX_END_NAMESPACE_STD
136
+
137
+ #endif // _LIBCUDACXX___MDSPAN_TYPE_LIST_HPP
miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/addressof.h ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___MEMORY_ADDRESSOF_H
12
+ #define _LIBCUDACXX___MEMORY_ADDRESSOF_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif //__cuda_std__
17
+
18
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
19
+ # pragma GCC system_header
20
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
21
+ # pragma clang system_header
22
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
23
+ # pragma system_header
24
+ #endif // no system header
25
+
26
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
27
+
28
+ // addressof
29
+ // NVCXX has the builtin defined but did not mark it as supported
30
+ #if defined(_LIBCUDACXX_ADDRESSOF)
31
+
32
+ template <class _Tp>
33
+ inline _LIBCUDACXX_CONSTEXPR_AFTER_CXX11
34
+ _LIBCUDACXX_NO_CFI _LIBCUDACXX_INLINE_VISIBILITY
35
+ _Tp*
36
+ addressof(_Tp& __x) noexcept
37
+ {
38
+ return __builtin_addressof(__x);
39
+ }
40
+
41
+ #else
42
+
43
+ template <class _Tp>
44
+ inline _LIBCUDACXX_NO_CFI _LIBCUDACXX_INLINE_VISIBILITY
45
+ _Tp*
46
+ addressof(_Tp& __x) noexcept
47
+ {
48
+ return reinterpret_cast<_Tp *>(
49
+ const_cast<char *>(&reinterpret_cast<const volatile char &>(__x)));
50
+ }
51
+
52
+ #endif // defined(_LIBCUDACXX_ADDRESSOF)
53
+
54
+ #if defined(_LIBCUDACXX_HAS_OBJC_ARC) && !defined(_LIBCUDACXX_PREDEFINED_OBJC_ARC_ADDRESSOF)
55
+ // Objective-C++ Automatic Reference Counting uses qualified pointers
56
+ // that require special addressof() signatures. When
57
+ // _LIBCUDACXX_PREDEFINED_OBJC_ARC_ADDRESSOF is defined, the compiler
58
+ // itself is providing these definitions. Otherwise, we provide them.
59
+ template <class _Tp>
60
+ inline _LIBCUDACXX_INLINE_VISIBILITY
61
+ __strong _Tp*
62
+ addressof(__strong _Tp& __x) noexcept
63
+ {
64
+ return &__x;
65
+ }
66
+
67
+ #ifdef _LIBCUDACXX_HAS_OBJC_ARC_WEAK
68
+ template <class _Tp>
69
+ inline _LIBCUDACXX_INLINE_VISIBILITY
70
+ __weak _Tp*
71
+ addressof(__weak _Tp& __x) noexcept
72
+ {
73
+ return &__x;
74
+ }
75
+ #endif
76
+
77
+ template <class _Tp>
78
+ inline _LIBCUDACXX_INLINE_VISIBILITY
79
+ __autoreleasing _Tp*
80
+ addressof(__autoreleasing _Tp& __x) noexcept
81
+ {
82
+ return &__x;
83
+ }
84
+
85
+ template <class _Tp>
86
+ inline _LIBCUDACXX_INLINE_VISIBILITY
87
+ __unsafe_unretained _Tp*
88
+ addressof(__unsafe_unretained _Tp& __x) noexcept
89
+ {
90
+ return &__x;
91
+ }
92
+ #endif
93
+
94
+ template <class _Tp> _Tp* addressof(const _Tp&&) noexcept = delete;
95
+
96
+ _LIBCUDACXX_END_NAMESPACE_STD
97
+
98
+ #endif // _LIBCUDACXX___MEMORY_ADDRESSOF_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/construct_at.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H
12
+ #define _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H
13
+
14
+ #ifndef __cuda_std__
15
+ # include <__config>
16
+ #endif //__cuda_std__
17
+
18
+ #include "../__assert"
19
+ #include "../__concepts/__concept_macros.h"
20
+ #include "../__iterator/access.h"
21
+ #include "../__memory/addressof.h"
22
+ #include "../__memory/voidify.h"
23
+ #include "../__type_traits/enable_if.h"
24
+ #include "../__type_traits/integral_constant.h"
25
+ #include "../__type_traits/is_arithmetic.h"
26
+ #include "../__type_traits/is_array.h"
27
+ #include "../__type_traits/is_constant_evaluated.h"
28
+ #include "../__type_traits/is_trivially_move_assignable.h"
29
+ #include "../__type_traits/is_trivially_constructible.h"
30
+ #include "../__type_traits/void_t.h"
31
+ #include "../__utility/declval.h"
32
+ #include "../__utility/forward.h"
33
+ #include "../__utility/move.h"
34
+
35
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
36
+ # pragma GCC system_header
37
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
38
+ # pragma clang system_header
39
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
40
+ # pragma system_header
41
+ #endif // no system header
42
+
43
+ #ifdef _LIBCUDACXX_COMPILER_CLANG_CUDA
44
+ # include <new>
45
+ #endif // _LIBCUDACXX_COMPILER_CLANG_CUDA
46
+
47
+ #if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17 // need to backfill ::std::construct_at
48
+ # ifndef _LIBCUDACXX_COMPILER_NVRTC
49
+ # include <memory>
50
+ # endif // _LIBCUDACXX_COMPILER_NVRTC
51
+
52
+ # ifndef __cpp_lib_constexpr_dynamic_alloc
53
+ namespace std
54
+ {
55
+ template <class _Tp,
56
+ class... _Args,
57
+ class = decltype(::new(_CUDA_VSTD::declval<void*>()) _Tp(_CUDA_VSTD::declval<_Args>()...))>
58
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr _Tp* construct_at(_Tp* __location, _Args&&... __args)
59
+ {
60
+ # if defined(_LIBCUDACXX_ADDRESSOF)
61
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
62
+ # else
63
+ return ::new (const_cast<void*>(static_cast<const volatile void*>(__location)))
64
+ _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
65
+ # endif
66
+ }
67
+ } // namespace std
68
+ # endif // __cpp_lib_constexpr_dynamic_alloc
69
+ #endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17
70
+
71
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
72
+
73
+ // There is a performance issue with placement new, where EDG based compiler insert a nullptr check that is superfluous
74
+ // Because this is a noticable performance regression, we specialize it for certain types
75
+ // This is possible because we are calling ::new ignoring any user defined overloads of operator placement new
76
+ namespace __detail
77
+ {
78
+ // We cannot allow narrowing conversions between arithmetic types as the assignment will give errors
79
+ template <class _To, class...>
80
+ struct __is_narrowing_impl : false_type
81
+ {};
82
+
83
+ template <class _To, class _From>
84
+ struct __is_narrowing_impl<_To, _From> : true_type
85
+ {};
86
+
87
+ // This is a bit hacky, but we rely on the fact that arithmetic types cannot have more than one argument to their constructor
88
+ template <class _To, class _From>
89
+ struct __is_narrowing_impl<_To, _From, __void_t<decltype(_To{_CUDA_VSTD::declval<_From>()})>> : false_type
90
+ {};
91
+
92
+ template <class _Tp, class... _Args>
93
+ using __is_narrowing = _If<_LIBCUDACXX_TRAIT(is_arithmetic, _Tp), __is_narrowing_impl<_Tp, _Args...>, false_type>;
94
+
95
+ // The destination type must be trivially constructible from the arguments and also trivially assignable, because we
96
+ // technically move assign in the optimization
97
+ template <class _Tp, class... _Args>
98
+ struct __can_optimize_construct_at
99
+ : integral_constant<bool,
100
+ _LIBCUDACXX_TRAIT(is_trivially_constructible, _Tp, _Args...)
101
+ && _LIBCUDACXX_TRAIT(is_trivially_move_assignable, _Tp)
102
+ && !__is_narrowing<_Tp, _Args...>::value>
103
+ {};
104
+ } // namespace __detail
105
+
106
+ // construct_at
107
+ #if _LIBCUDACXX_STD_VER > 17
108
+
109
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
110
+ template <class _Tp,
111
+ class... _Args,
112
+ class = decltype(::new(_CUDA_VSTD::declval<void*>()) _Tp(_CUDA_VSTD::declval<_Args>()...))>
113
+ _LIBCUDACXX_INLINE_VISIBILITY
114
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 __enable_if_t<!__detail::__can_optimize_construct_at<_Tp, _Args...>::value, _Tp*>
115
+ construct_at(_Tp* __location, _Args&&... __args)
116
+ {
117
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
118
+ # if defined(__cuda_std__)
119
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
120
+ if (__libcpp_is_constant_evaluated())
121
+ {
122
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
123
+ }
124
+ # endif // __cuda_std__
125
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
126
+ }
127
+
128
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
129
+ template <class _Tp,
130
+ class... _Args,
131
+ class = decltype(::new(_CUDA_VSTD::declval<void*>()) _Tp(_CUDA_VSTD::declval<_Args>()...))>
132
+ _LIBCUDACXX_INLINE_VISIBILITY
133
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 __enable_if_t<__detail::__can_optimize_construct_at<_Tp, _Args...>::value, _Tp*>
134
+ construct_at(_Tp* __location, _Args&&... __args)
135
+ {
136
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
137
+ # if defined(__cuda_std__)
138
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
139
+ if (__libcpp_is_constant_evaluated())
140
+ {
141
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
142
+ }
143
+ *__location = _Tp{_CUDA_VSTD::forward<_Args>(__args)...};
144
+ return __location;
145
+ # else // ^^^ __cuda_std__ ^^^ / vvv !__cuda_std__ vvv
146
+ // NVCC always considers construction + move assignment, other compilers are smarter using copy construction
147
+ // So rather than adding all kinds of workarounds simply fall back to the correct implementation for libcxx mode
148
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
149
+ # endif // !__cuda_std__
150
+ }
151
+
152
+ #endif // _LIBCUDACXX_STD_VER > 17
153
+
154
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
155
+ template <class _Tp, class... _Args>
156
+ _LIBCUDACXX_INLINE_VISIBILITY
157
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 __enable_if_t<!__detail::__can_optimize_construct_at<_Tp, _Args...>::value, _Tp*>
158
+ __construct_at(_Tp* __location, _Args&&... __args)
159
+ {
160
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
161
+ #if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17
162
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
163
+ if (__libcpp_is_constant_evaluated())
164
+ {
165
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
166
+ }
167
+ #endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17
168
+ return ::new (_CUDA_VSTD::__voidify(*__location)) _Tp(_CUDA_VSTD::forward<_Args>(__args)...);
169
+ }
170
+
171
+ _LIBCUDACXX_DISABLE_EXEC_CHECK
172
+ template <class _Tp, class... _Args>
173
+ _LIBCUDACXX_INLINE_VISIBILITY
174
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 __enable_if_t<__detail::__can_optimize_construct_at<_Tp, _Args...>::value, _Tp*>
175
+ __construct_at(_Tp* __location, _Args&&... __args)
176
+ {
177
+ _LIBCUDACXX_ASSERT(__location != nullptr, "null pointer given to construct_at");
178
+ #if defined(__cuda_std__) && _LIBCUDACXX_STD_VER > 17
179
+ // Need to go through `std::construct_at` as that is the explicitly blessed function
180
+ if (__libcpp_is_constant_evaluated())
181
+ {
182
+ return ::std::construct_at(__location, _CUDA_VSTD::forward<_Args>(__args)...);
183
+ }
184
+ #endif // __cuda_std__ && _LIBCUDACXX_STD_VER > 17
185
+ *__location = _Tp{_CUDA_VSTD::forward<_Args>(__args)...};
186
+ return __location;
187
+ }
188
+
189
+ // destroy_at
190
+
191
+ // The internal functions are available regardless of the language version (with the exception of the `__destroy_at`
192
+ // taking an array).
193
+ template <class _ForwardIterator>
194
+ _LIBCUDACXX_INLINE_VISIBILITY
195
+ _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 _ForwardIterator __destroy(_ForwardIterator, _ForwardIterator);
196
+
197
+ template <class _Tp, __enable_if_t<!is_array<_Tp>::value, int> = 0>
198
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void __destroy_at(_Tp* __loc)
199
+ {
200
+ _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at");
201
+ __loc->~_Tp();
202
+ }
203
+
204
+ #if _LIBCUDACXX_STD_VER > 17
205
+ template <class _Tp, __enable_if_t<is_array<_Tp>::value, int> = 0>
206
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void __destroy_at(_Tp* __loc)
207
+ {
208
+ _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at");
209
+ _CUDA_VSTD::__destroy(_CUDA_VSTD::begin(*__loc), _CUDA_VSTD::end(*__loc));
210
+ }
211
+ #endif
212
+
213
+ template <class _ForwardIterator>
214
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 _ForwardIterator
215
+ __destroy(_ForwardIterator __first, _ForwardIterator __last)
216
+ {
217
+ for (; __first != __last; ++__first)
218
+ {
219
+ _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__first));
220
+ }
221
+ return __first;
222
+ }
223
+
224
+ template <class _BidirectionalIterator>
225
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 _BidirectionalIterator
226
+ __reverse_destroy(_BidirectionalIterator __first, _BidirectionalIterator __last)
227
+ {
228
+ while (__last != __first)
229
+ {
230
+ --__last;
231
+ _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__last));
232
+ }
233
+ return __last;
234
+ }
235
+
236
+ #if _LIBCUDACXX_STD_VER > 14
237
+
238
+ template <class _Tp, enable_if_t<!is_array_v<_Tp>, int> = 0>
239
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void destroy_at(_Tp* __loc)
240
+ {
241
+ _LIBCUDACXX_ASSERT(__loc != nullptr, "null pointer given to destroy_at");
242
+ __loc->~_Tp();
243
+ }
244
+
245
+ # if _LIBCUDACXX_STD_VER > 17
246
+ template <class _Tp, enable_if_t<is_array_v<_Tp>, int> = 0>
247
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void destroy_at(_Tp* __loc)
248
+ {
249
+ _CUDA_VSTD::__destroy_at(__loc);
250
+ }
251
+ # endif // _LIBCUDACXX_STD_VER > 17
252
+
253
+ template <class _ForwardIterator>
254
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void
255
+ destroy(_ForwardIterator __first, _ForwardIterator __last)
256
+ {
257
+ (void) _CUDA_VSTD::__destroy(_CUDA_VSTD::move(__first), _CUDA_VSTD::move(__last));
258
+ }
259
+
260
+ template <class _ForwardIterator, class _Size>
261
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 _ForwardIterator
262
+ destroy_n(_ForwardIterator __first, _Size __n)
263
+ {
264
+ for (; __n > 0; (void) ++__first, --__n)
265
+ {
266
+ _CUDA_VSTD::__destroy_at(_CUDA_VSTD::addressof(*__first));
267
+ }
268
+ return __first;
269
+ }
270
+
271
+ #endif // _LIBCUDACXX_STD_VER > 14
272
+
273
+ _LIBCUDACXX_END_NAMESPACE_STD
274
+
275
+ #endif // _LIBCUDACXX___MEMORY_CONSTRUCT_AT_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/pointer_traits.h ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___MEMORY_POINTER_TRAITS_H
11
+ #define _LIBCUDACXX___MEMORY_POINTER_TRAITS_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif //__cuda_std__
16
+
17
+ #include "../__memory/addressof.h"
18
+ #include "../__type_traits/conjunction.h"
19
+ #include "../__type_traits/conditional.h"
20
+ #include "../__type_traits/decay.h"
21
+ #include "../__type_traits/enable_if.h"
22
+ #include "../__type_traits/integral_constant.h"
23
+ #include "../__type_traits/is_class.h"
24
+ #include "../__type_traits/is_function.h"
25
+ #include "../__type_traits/is_void.h"
26
+ #include "../__type_traits/void_t.h"
27
+ #include "../__utility/declval.h"
28
+ #include "../cstddef"
29
+
30
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
31
+ # pragma GCC system_header
32
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
33
+ # pragma clang system_header
34
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
35
+ # pragma system_header
36
+ #endif // no system header
37
+
38
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
39
+
40
+ template <class _Tp, class = void>
41
+ struct __has_element_type : false_type {};
42
+
43
+ template <class _Tp>
44
+ struct __has_element_type<_Tp,
45
+ __void_t<typename _Tp::element_type>> : true_type {};
46
+
47
+ template <class _Ptr, bool = __has_element_type<_Ptr>::value>
48
+ struct __pointer_traits_element_type;
49
+
50
+ template <class _Ptr>
51
+ struct __pointer_traits_element_type<_Ptr, true>
52
+ {
53
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Ptr::element_type type;
54
+ };
55
+
56
+ #ifndef _LIBCUDACXX_HAS_NO_VARIADICS
57
+
58
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args>
59
+ struct __pointer_traits_element_type<_Sp<_Tp, _Args...>, true>
60
+ {
61
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Sp<_Tp, _Args...>::element_type type;
62
+ };
63
+
64
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args>
65
+ struct __pointer_traits_element_type<_Sp<_Tp, _Args...>, false>
66
+ {
67
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;
68
+ };
69
+
70
+ #else // _LIBCUDACXX_HAS_NO_VARIADICS
71
+
72
+ template <template <class> class _Sp, class _Tp>
73
+ struct __pointer_traits_element_type<_Sp<_Tp>, true>
74
+ {
75
+ typedef typename _Sp<_Tp>::element_type type;
76
+ };
77
+
78
+ template <template <class> class _Sp, class _Tp>
79
+ struct __pointer_traits_element_type<_Sp<_Tp>, false>
80
+ {
81
+ typedef _Tp type;
82
+ };
83
+
84
+ template <template <class, class> class _Sp, class _Tp, class _A0>
85
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0>, true>
86
+ {
87
+ typedef typename _Sp<_Tp, _A0>::element_type type;
88
+ };
89
+
90
+ template <template <class, class> class _Sp, class _Tp, class _A0>
91
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0>, false>
92
+ {
93
+ typedef _Tp type;
94
+ };
95
+
96
+ template <template <class, class, class> class _Sp, class _Tp, class _A0, class _A1>
97
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1>, true>
98
+ {
99
+ typedef typename _Sp<_Tp, _A0, _A1>::element_type type;
100
+ };
101
+
102
+ template <template <class, class, class> class _Sp, class _Tp, class _A0, class _A1>
103
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1>, false>
104
+ {
105
+ typedef _Tp type;
106
+ };
107
+
108
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
109
+ class _A1, class _A2>
110
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1, _A2>, true>
111
+ {
112
+ typedef typename _Sp<_Tp, _A0, _A1, _A2>::element_type type;
113
+ };
114
+
115
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
116
+ class _A1, class _A2>
117
+ struct __pointer_traits_element_type<_Sp<_Tp, _A0, _A1, _A2>, false>
118
+ {
119
+ typedef _Tp type;
120
+ };
121
+
122
+ #endif // _LIBCUDACXX_HAS_NO_VARIADICS
123
+
124
+ template <class _Tp, class = void>
125
+ struct __has_difference_type : false_type {};
126
+
127
+ template <class _Tp>
128
+ struct __has_difference_type<_Tp,
129
+ __void_t<typename _Tp::difference_type>> : true_type {};
130
+
131
+ template <class _Ptr, bool = __has_difference_type<_Ptr>::value>
132
+ struct __pointer_traits_difference_type
133
+ {
134
+ typedef _LIBCUDACXX_NODEBUG_TYPE ptrdiff_t type;
135
+ };
136
+
137
+ template <class _Ptr>
138
+ struct __pointer_traits_difference_type<_Ptr, true>
139
+ {
140
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Ptr::difference_type type;
141
+ };
142
+
143
+ template <class _Tp, class _Up>
144
+ struct __has_rebind
145
+ {
146
+ private:
147
+ template <class _Xp> _LIBCUDACXX_INLINE_VISIBILITY static false_type __test(...);
148
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_PUSH
149
+ template <class _Xp> _LIBCUDACXX_INLINE_VISIBILITY static true_type __test(typename _Xp::template rebind<_Up>* = 0);
150
+ _LIBCUDACXX_SUPPRESS_DEPRECATED_POP
151
+ public:
152
+ static const bool value = decltype(__test<_Tp>(0))::value;
153
+ };
154
+
155
+ template <class _Tp, class _Up, bool = __has_rebind<_Tp, _Up>::value>
156
+ struct __pointer_traits_rebind
157
+ {
158
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Tp::template rebind<_Up> type;
159
+ };
160
+
161
+ #ifndef _LIBCUDACXX_HAS_NO_VARIADICS
162
+
163
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args, class _Up>
164
+ struct __pointer_traits_rebind<_Sp<_Tp, _Args...>, _Up, true>
165
+ {
166
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename _Sp<_Tp, _Args...>::template rebind<_Up> type;
167
+ };
168
+
169
+ template <template <class, class...> class _Sp, class _Tp, class ..._Args, class _Up>
170
+ struct __pointer_traits_rebind<_Sp<_Tp, _Args...>, _Up, false>
171
+ {
172
+ typedef _Sp<_Up, _Args...> type;
173
+ };
174
+
175
+ #else // _LIBCUDACXX_HAS_NO_VARIADICS
176
+
177
+ template <template <class> class _Sp, class _Tp, class _Up>
178
+ struct __pointer_traits_rebind<_Sp<_Tp>, _Up, true>
179
+ {
180
+ typedef typename _Sp<_Tp>::template rebind<_Up> type;
181
+ };
182
+
183
+ template <template <class> class _Sp, class _Tp, class _Up>
184
+ struct __pointer_traits_rebind<_Sp<_Tp>, _Up, false>
185
+ {
186
+ typedef _Sp<_Up> type;
187
+ };
188
+
189
+ template <template <class, class> class _Sp, class _Tp, class _A0, class _Up>
190
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0>, _Up, true>
191
+ {
192
+ typedef typename _Sp<_Tp, _A0>::template rebind<_Up> type;
193
+ };
194
+
195
+ template <template <class, class> class _Sp, class _Tp, class _A0, class _Up>
196
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0>, _Up, false>
197
+ {
198
+ typedef _Sp<_Up, _A0> type;
199
+ };
200
+
201
+ template <template <class, class, class> class _Sp, class _Tp, class _A0,
202
+ class _A1, class _Up>
203
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1>, _Up, true>
204
+ {
205
+ typedef typename _Sp<_Tp, _A0, _A1>::template rebind<_Up> type;
206
+ };
207
+
208
+ template <template <class, class, class> class _Sp, class _Tp, class _A0,
209
+ class _A1, class _Up>
210
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1>, _Up, false>
211
+ {
212
+ typedef _Sp<_Up, _A0, _A1> type;
213
+ };
214
+
215
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
216
+ class _A1, class _A2, class _Up>
217
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1, _A2>, _Up, true>
218
+ {
219
+ typedef typename _Sp<_Tp, _A0, _A1, _A2>::template rebind<_Up> type;
220
+ };
221
+
222
+ template <template <class, class, class, class> class _Sp, class _Tp, class _A0,
223
+ class _A1, class _A2, class _Up>
224
+ struct __pointer_traits_rebind<_Sp<_Tp, _A0, _A1, _A2>, _Up, false>
225
+ {
226
+ typedef _Sp<_Up, _A0, _A1, _A2> type;
227
+ };
228
+
229
+ #endif // _LIBCUDACXX_HAS_NO_VARIADICS
230
+
231
+ template <class _Ptr>
232
+ struct _LIBCUDACXX_TEMPLATE_VIS pointer_traits
233
+ {
234
+ typedef _Ptr pointer;
235
+ typedef typename __pointer_traits_element_type<pointer>::type element_type;
236
+ typedef typename __pointer_traits_difference_type<pointer>::type difference_type;
237
+
238
+ template <class _Up> using rebind = typename __pointer_traits_rebind<pointer, _Up>::type;
239
+
240
+ private:
241
+ struct __nat {};
242
+ public:
243
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
244
+ static pointer pointer_to(__conditional_t<is_void<element_type>::value, __nat, element_type>& __r)
245
+ {return pointer::pointer_to(__r);}
246
+ };
247
+
248
+ template <class _Tp>
249
+ struct _LIBCUDACXX_TEMPLATE_VIS pointer_traits<_Tp*>
250
+ {
251
+ typedef _Tp* pointer;
252
+ typedef _Tp element_type;
253
+ typedef ptrdiff_t difference_type;
254
+
255
+ template <class _Up> using rebind = _Up*;
256
+
257
+ private:
258
+ struct __nat {};
259
+ public:
260
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17
261
+ static pointer pointer_to(__conditional_t<is_void<element_type>::value, __nat, element_type>& __r) noexcept
262
+ {return _CUDA_VSTD::addressof(__r);}
263
+ };
264
+
265
+ template <class _From, class _To>
266
+ struct __rebind_pointer {
267
+ typedef typename pointer_traits<_From>::template rebind<_To> type;
268
+ };
269
+
270
+ // to_address
271
+
272
+ template <class _Pointer, class = void>
273
+ struct __to_address_helper;
274
+
275
+ template <class _Tp>
276
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr
277
+ _Tp* __to_address(_Tp* __p) noexcept {
278
+ static_assert(!is_function<_Tp>::value, "_Tp is a function type");
279
+ return __p;
280
+ }
281
+
282
+ template <class _Pointer, class = void>
283
+ struct _HasToAddress : false_type {};
284
+
285
+ template <class _Pointer>
286
+ struct _HasToAddress<_Pointer,
287
+ decltype((void)pointer_traits<_Pointer>::to_address(declval<const _Pointer&>()))
288
+ > : true_type {};
289
+
290
+ template <class _Pointer, class = void>
291
+ struct _HasArrow : false_type {};
292
+
293
+ template <class _Pointer>
294
+ struct _HasArrow<_Pointer,
295
+ decltype((void)declval<const _Pointer&>().operator->())
296
+ > : true_type {};
297
+
298
+ template <class _Pointer>
299
+ struct _IsFancyPointer {
300
+ static const bool value = _HasArrow<_Pointer>::value || _HasToAddress<_Pointer>::value;
301
+ };
302
+
303
+ // enable_if is needed here to avoid instantiating checks for fancy pointers on raw pointers
304
+ template <class _Pointer, class = __enable_if_t<
305
+ _And<is_class<_Pointer>, _IsFancyPointer<_Pointer> >::value
306
+ > >
307
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr
308
+ __decay_t<decltype(__to_address_helper<_Pointer>::__call(declval<const _Pointer&>()))>
309
+ __to_address(const _Pointer& __p) noexcept {
310
+ return __to_address_helper<_Pointer>::__call(__p);
311
+ }
312
+
313
+ template <class _Pointer, class>
314
+ struct __to_address_helper {
315
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr
316
+ static decltype(_CUDA_VSTD::__to_address(declval<const _Pointer&>().operator->()))
317
+ __call(const _Pointer& __p) noexcept {
318
+ return _CUDA_VSTD::__to_address(__p.operator->());
319
+ }
320
+ };
321
+
322
+ template <class _Pointer>
323
+ struct __to_address_helper<_Pointer, decltype((void)pointer_traits<_Pointer>::to_address(declval<const _Pointer&>()))> {
324
+ _LIBCUDACXX_INLINE_VISIBILITY constexpr
325
+ static decltype(pointer_traits<_Pointer>::to_address(declval<const _Pointer&>()))
326
+ __call(const _Pointer& __p) noexcept {
327
+ return pointer_traits<_Pointer>::to_address(__p);
328
+ }
329
+ };
330
+
331
+ #if _LIBCUDACXX_STD_VER > 11
332
+ template <class _Tp>
333
+ inline _LIBCUDACXX_INLINE_VISIBILITY constexpr
334
+ auto to_address(_Tp *__p) noexcept {
335
+ return _CUDA_VSTD::__to_address(__p);
336
+ }
337
+
338
+ template <class _Pointer>
339
+ inline _LIBCUDACXX_INLINE_VISIBILITY constexpr
340
+ auto to_address(const _Pointer& __p) noexcept -> decltype(_CUDA_VSTD::__to_address(__p)) {
341
+ return _CUDA_VSTD::__to_address(__p);
342
+ }
343
+ #endif
344
+
345
+ _LIBCUDACXX_END_NAMESPACE_STD
346
+
347
+ #endif // _LIBCUDACXX___MEMORY_POINTER_TRAITS_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__memory/voidify.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // -*- C++ -*-
2
+ //===----------------------------------------------------------------------===//
3
+ //
4
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5
+ // See https://llvm.org/LICENSE.txt for license information.
6
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___MEMORY_VOIDIFY_H
12
+ #define _LIBCUDACXX___MEMORY_VOIDIFY_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif //__cuda_std__
17
+
18
+ #include "../__memory/addressof.h"
19
+
20
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
21
+ # pragma GCC system_header
22
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
23
+ # pragma clang system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
25
+ # pragma system_header
26
+ #endif // no system header
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ template <typename _Tp>
31
+ _LIBCUDACXX_INLINE_VISIBILITY _LIBCUDACXX_CONSTEXPR_AFTER_CXX17 void* __voidify(_Tp& __from) {
32
+ // Cast away cv-qualifiers to allow modifying elements of a range through const iterators.
33
+ return const_cast<void*>(static_cast<const volatile void*>(_CUDA_VSTD::addressof(__from)));
34
+ }
35
+
36
+ _LIBCUDACXX_END_NAMESPACE_STD
37
+
38
+ #endif // _LIBCUDACXX___MEMORY_VOIDIFY_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/apply_cv.h ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_APPLY_CV_H
11
+ #define _LIBCUDACXX___TUPLE_APPLY_CV_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_const.h"
18
+ #include "../__type_traits/is_reference.h"
19
+ #include "../__type_traits/is_volatile.h"
20
+ #include "../__type_traits/remove_reference.h"
21
+
22
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
23
+ # pragma GCC system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
25
+ # pragma clang system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
27
+ # pragma system_header
28
+ #endif // no system header
29
+
30
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
31
+
32
+ template <bool _ApplyLV, bool _ApplyConst, bool _ApplyVolatile>
33
+ struct __apply_cv_mf;
34
+ template <>
35
+ struct __apply_cv_mf<false, false, false>
36
+ {
37
+ template <class _Tp>
38
+ using __apply = _Tp;
39
+ };
40
+ template <>
41
+ struct __apply_cv_mf<false, true, false>
42
+ {
43
+ template <class _Tp>
44
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = const _Tp;
45
+ };
46
+ template <>
47
+ struct __apply_cv_mf<false, false, true>
48
+ {
49
+ template <class _Tp>
50
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = volatile _Tp;
51
+ };
52
+ template <>
53
+ struct __apply_cv_mf<false, true, true>
54
+ {
55
+ template <class _Tp>
56
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = const volatile _Tp;
57
+ };
58
+ template <>
59
+ struct __apply_cv_mf<true, false, false>
60
+ {
61
+ template <class _Tp>
62
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = _Tp&;
63
+ };
64
+ template <>
65
+ struct __apply_cv_mf<true, true, false>
66
+ {
67
+ template <class _Tp>
68
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = const _Tp&;
69
+ };
70
+ template <>
71
+ struct __apply_cv_mf<true, false, true>
72
+ {
73
+ template <class _Tp>
74
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = volatile _Tp&;
75
+ };
76
+ template <>
77
+ struct __apply_cv_mf<true, true, true>
78
+ {
79
+ template <class _Tp>
80
+ using __apply _LIBCUDACXX_NODEBUG_TYPE = const volatile _Tp&;
81
+ };
82
+ template <class _Tp, class _RawTp = __libcpp_remove_reference_t<_Tp> >
83
+ using __apply_cv_t _LIBCUDACXX_NODEBUG_TYPE =
84
+ __apply_cv_mf<_LIBCUDACXX_TRAIT(is_lvalue_reference, _Tp),
85
+ _LIBCUDACXX_TRAIT(is_const, _RawTp),
86
+ _LIBCUDACXX_TRAIT(is_volatile, _RawTp)>;
87
+
88
+ _LIBCUDACXX_END_NAMESPACE_STD
89
+
90
+ #endif // _LIBCUDACXX___TUPLE_APPLY_CV_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/make_tuple_types.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_MAKE_TUPLE_TYPES_H
11
+ #define _LIBCUDACXX___TUPLE_MAKE_TUPLE_TYPES_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/array.h"
18
+ #include "../__fwd/tuple.h"
19
+ #include "../__tuple_dir/apply_cv.h"
20
+ #include "../__tuple_dir/tuple_element.h"
21
+ #include "../__tuple_dir/tuple_indices.h"
22
+ #include "../__tuple_dir/tuple_size.h"
23
+ #include "../__tuple_dir/tuple_types.h"
24
+ #include "../__type_traits/remove_cv.h"
25
+ #include "../__type_traits/remove_reference.h"
26
+ #include "../cstddef"
27
+
28
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
29
+ # pragma GCC system_header
30
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
31
+ # pragma clang system_header
32
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
33
+ # pragma system_header
34
+ #endif // no system header
35
+
36
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
37
+
38
+ // __make_tuple_types<_Tuple<_Types...>, _Ep, _Sp>::type is a
39
+ // __tuple_types<_Types...> using only those _Types in the range [_Sp, _Ep).
40
+ // _Sp defaults to 0 and _Ep defaults to tuple_size<_Tuple>. If _Tuple is a
41
+ // lvalue_reference type, then __tuple_types<_Types&...> is the result.
42
+
43
+ template <class _TupleTypes, class _TupleIndices>
44
+ struct __make_tuple_types_flat;
45
+
46
+ template <template <class...> class _Tuple, class... _Types, size_t... _Idx>
47
+ struct __make_tuple_types_flat<_Tuple<_Types...>, __tuple_indices<_Idx...>>
48
+ {
49
+ // Specialization for pair, tuple, and __tuple_types
50
+ template <class _Tp, class _ApplyFn = __apply_cv_t<_Tp>>
51
+ using __apply_quals _LIBCUDACXX_NODEBUG_TYPE =
52
+ __tuple_types< typename _ApplyFn::template __apply<__type_pack_element<_Idx, _Types...>>... >;
53
+ };
54
+
55
+ template <class _Vt, size_t _Np, size_t... _Idx>
56
+ struct __make_tuple_types_flat<array<_Vt, _Np>, __tuple_indices<_Idx...>>
57
+ {
58
+ template <size_t>
59
+ using __value_type = _Vt;
60
+ template <class _Tp, class _ApplyFn = __apply_cv_t<_Tp>>
61
+ using __apply_quals = __tuple_types< typename _ApplyFn::template __apply<__value_type<_Idx>>... >;
62
+ };
63
+
64
+ template <class _Tp, size_t _Ep = tuple_size<__libcpp_remove_reference_t<_Tp>>::value, size_t _Sp = 0,
65
+ bool _SameSize = (_Ep == tuple_size<__libcpp_remove_reference_t<_Tp>>::value)>
66
+ struct __make_tuple_types
67
+ {
68
+ static_assert(_Sp <= _Ep, "__make_tuple_types input error");
69
+ using _RawTp = __remove_cv_t<__libcpp_remove_reference_t<_Tp>>;
70
+ using _Maker = __make_tuple_types_flat<_RawTp, __make_tuple_indices_t<_Ep, _Sp>>;
71
+ using type = typename _Maker::template __apply_quals<_Tp>;
72
+ };
73
+
74
+ template <class... _Types, size_t _Ep>
75
+ struct __make_tuple_types<tuple<_Types...>, _Ep, 0, true>
76
+ {
77
+ typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_types<_Types...> type;
78
+ };
79
+
80
+ template <class... _Types, size_t _Ep>
81
+ struct __make_tuple_types<__tuple_types<_Types...>, _Ep, 0, true>
82
+ {
83
+ typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_types<_Types...> type;
84
+ };
85
+
86
+ template <class _Tp, size_t _Ep = tuple_size<__libcpp_remove_reference_t<_Tp>>::value, size_t _Sp = 0>
87
+ using __make_tuple_types_t = typename __make_tuple_types<_Tp, _Ep, _Sp>::type;
88
+
89
+ _LIBCUDACXX_END_NAMESPACE_STD
90
+
91
+ #endif // _LIBCUDACXX___TUPLE_MAKE_TUPLE_TYPES_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/sfinae_helpers.h ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_SFINAE_HELPERS_H
11
+ #define _LIBCUDACXX___TUPLE_SFINAE_HELPERS_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/tuple.h"
18
+ #include "../__tuple_dir/make_tuple_types.h"
19
+ #include "../__tuple_dir/tuple_element.h"
20
+ #include "../__tuple_dir/tuple_like.h"
21
+ #include "../__tuple_dir/tuple_size.h"
22
+ #include "../__tuple_dir/tuple_types.h"
23
+ #include "../__type_traits/enable_if.h"
24
+ #include "../__type_traits/integral_constant.h"
25
+ #include "../__type_traits/is_assignable.h"
26
+ #include "../__type_traits/is_constructible.h"
27
+ #include "../__type_traits/is_convertible.h"
28
+ #include "../__type_traits/is_same.h"
29
+ #include "../__type_traits/remove_cvref.h"
30
+ #include "../__type_traits/remove_reference.h"
31
+ #include "../cstddef"
32
+
33
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
34
+ # pragma GCC system_header
35
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
36
+ # pragma clang system_header
37
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
38
+ # pragma system_header
39
+ #endif // no system header
40
+
41
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
42
+
43
+ template <bool... _Preds>
44
+ struct __all_dummy;
45
+
46
+ template <bool... _Pred>
47
+ using __all = _IsSame<__all_dummy<_Pred...>, __all_dummy<((void) _Pred, true)...>>;
48
+
49
+ struct __tuple_sfinae_base
50
+ {
51
+ template <class, class>
52
+ struct __test_size : false_type
53
+ {};
54
+
55
+ template <class... _Tp, class... _Up>
56
+ struct __test_size<__tuple_types<_Tp...>, __tuple_types<_Up...>> : _BoolConstant<sizeof...(_Tp) == sizeof...(_Up)>
57
+ {};
58
+
59
+ template <template <class, class...> class, class _Tp, class _Up, bool = __test_size<_Tp, _Up>::value>
60
+ struct __test : false_type
61
+ {};
62
+
63
+ template <template <class, class...> class _Trait, class... _LArgs, class... _RArgs>
64
+ struct __test<_Trait, __tuple_types<_LArgs...>, __tuple_types<_RArgs...>, true>
65
+ : __all<_Trait<_LArgs, _RArgs>::value...>
66
+ {};
67
+
68
+ template <class _FromArgs, class _ToArgs>
69
+ using __constructible = __test<is_constructible, _ToArgs, _FromArgs>;
70
+ template <class _FromArgs, class _ToArgs>
71
+ using __convertible = __test<is_convertible, _FromArgs, _ToArgs>;
72
+ template <class _FromArgs, class _ToArgs>
73
+ using __assignable = __test<is_assignable, _ToArgs, _FromArgs>;
74
+ };
75
+
76
+ // __tuple_convertible
77
+
78
+ template <class _Tp, class _Up, bool = __tuple_like<__libcpp_remove_reference_t<_Tp>>::value,
79
+ bool = __tuple_like<_Up>::value>
80
+ struct __tuple_convertible : public false_type
81
+ {};
82
+
83
+ template <class _Tp, class _Up>
84
+ struct __tuple_convertible<_Tp, _Up, true, true>
85
+ : public __tuple_sfinae_base::__convertible<__make_tuple_types_t<_Tp>, __make_tuple_types_t<_Up>>
86
+ {};
87
+
88
+ // __tuple_constructible
89
+
90
+ template <class _Tp, class _Up, bool = __tuple_like<__libcpp_remove_reference_t<_Tp>>::value,
91
+ bool = __tuple_like<_Up>::value>
92
+ struct __tuple_constructible : public false_type
93
+ {};
94
+
95
+ template <class _Tp, class _Up>
96
+ struct __tuple_constructible<_Tp, _Up, true, true>
97
+ : public __tuple_sfinae_base::__constructible<__make_tuple_types_t<_Tp>, __make_tuple_types_t<_Up>>
98
+ {};
99
+
100
+ // __tuple_assignable
101
+
102
+ template <class _Tp, class _Up, bool = __tuple_like<__libcpp_remove_reference_t<_Tp>>::value,
103
+ bool = __tuple_like<_Up>::value>
104
+ struct __tuple_assignable : public false_type
105
+ {};
106
+
107
+ template <class _Tp, class _Up>
108
+ struct __tuple_assignable<_Tp, _Up, true, true>
109
+ : public __tuple_sfinae_base::__assignable<__make_tuple_types_t<_Tp>, __make_tuple_types_t<_Up&>>
110
+ {};
111
+
112
+ template <size_t _Ip, class... _Tp>
113
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, tuple<_Tp...> >
114
+ {
115
+ typedef _LIBCUDACXX_NODEBUG_TYPE __tuple_element_t<_Ip, __tuple_types<_Tp...>> type;
116
+ };
117
+
118
+ template <bool _IsTuple, class _SizeTrait, size_t _Expected>
119
+ struct __tuple_like_with_size_imp : false_type
120
+ {};
121
+
122
+ template <class _SizeTrait, size_t _Expected>
123
+ struct __tuple_like_with_size_imp<true, _SizeTrait, _Expected> : integral_constant<bool, _SizeTrait::value == _Expected>
124
+ {};
125
+
126
+ template <class _Tuple, size_t _ExpectedSize, class _RawTuple = __remove_cvref_t<_Tuple>>
127
+ using __tuple_like_with_size _LIBCUDACXX_NODEBUG_TYPE =
128
+ __tuple_like_with_size_imp< __tuple_like<_RawTuple>::value, tuple_size<_RawTuple>, _ExpectedSize >;
129
+
130
+ struct _LIBCUDACXX_TYPE_VIS __check_tuple_constructor_fail
131
+ {
132
+ template <int&...>
133
+ using __enable_explicit_default = false_type;
134
+ template <int&...>
135
+ using __enable_implicit_default = false_type;
136
+ template <class...>
137
+ using __enable_explicit = false_type;
138
+ template <class...>
139
+ using __enable_implicit = false_type;
140
+ template <class...>
141
+ using __enable_assign = false_type;
142
+ };
143
+
144
+ #if _LIBCUDACXX_STD_VER > 11
145
+
146
+ template <bool _CanCopy, bool _CanMove>
147
+ struct __sfinae_ctor_base
148
+ {};
149
+ template <>
150
+ struct __sfinae_ctor_base<false, false>
151
+ {
152
+ __sfinae_ctor_base() = default;
153
+ __sfinae_ctor_base(__sfinae_ctor_base const&) = delete;
154
+ __sfinae_ctor_base(__sfinae_ctor_base&&) = delete;
155
+ __sfinae_ctor_base& operator=(__sfinae_ctor_base const&) = default;
156
+ __sfinae_ctor_base& operator=(__sfinae_ctor_base&&) = default;
157
+ };
158
+ template <>
159
+ struct __sfinae_ctor_base<true, false>
160
+ {
161
+ __sfinae_ctor_base() = default;
162
+ __sfinae_ctor_base(__sfinae_ctor_base const&) = default;
163
+ __sfinae_ctor_base(__sfinae_ctor_base&&) = delete;
164
+ __sfinae_ctor_base& operator=(__sfinae_ctor_base const&) = default;
165
+ __sfinae_ctor_base& operator=(__sfinae_ctor_base&&) = default;
166
+ };
167
+ template <>
168
+ struct __sfinae_ctor_base<false, true>
169
+ {
170
+ __sfinae_ctor_base() = default;
171
+ __sfinae_ctor_base(__sfinae_ctor_base const&) = delete;
172
+ __sfinae_ctor_base(__sfinae_ctor_base&&) = default;
173
+ __sfinae_ctor_base& operator=(__sfinae_ctor_base const&) = default;
174
+ __sfinae_ctor_base& operator=(__sfinae_ctor_base&&) = default;
175
+ };
176
+
177
+ template <bool _CanCopy, bool _CanMove>
178
+ struct __sfinae_assign_base
179
+ {};
180
+ template <>
181
+ struct __sfinae_assign_base<false, false>
182
+ {
183
+ __sfinae_assign_base() = default;
184
+ __sfinae_assign_base(__sfinae_assign_base const&) = default;
185
+ __sfinae_assign_base(__sfinae_assign_base&&) = default;
186
+ __sfinae_assign_base& operator=(__sfinae_assign_base const&) = delete;
187
+ __sfinae_assign_base& operator=(__sfinae_assign_base&&) = delete;
188
+ };
189
+ template <>
190
+ struct __sfinae_assign_base<true, false>
191
+ {
192
+ __sfinae_assign_base() = default;
193
+ __sfinae_assign_base(__sfinae_assign_base const&) = default;
194
+ __sfinae_assign_base(__sfinae_assign_base&&) = default;
195
+ __sfinae_assign_base& operator=(__sfinae_assign_base const&) = default;
196
+ __sfinae_assign_base& operator=(__sfinae_assign_base&&) = delete;
197
+ };
198
+ template <>
199
+ struct __sfinae_assign_base<false, true>
200
+ {
201
+ __sfinae_assign_base() = default;
202
+ __sfinae_assign_base(__sfinae_assign_base const&) = default;
203
+ __sfinae_assign_base(__sfinae_assign_base&&) = default;
204
+ __sfinae_assign_base& operator=(__sfinae_assign_base const&) = delete;
205
+ __sfinae_assign_base& operator=(__sfinae_assign_base&&) = default;
206
+ };
207
+ #endif // _LIBCUDACXX_STD_VER > 11
208
+
209
+ _LIBCUDACXX_END_NAMESPACE_STD
210
+
211
+ #endif // _LIBCUDACXX___TUPLE_SFINAE_HELPERS_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/structured_bindings.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_STRUCTURED_BINDINGS_H
11
+ #define _LIBCUDACXX___TUPLE_STRUCTURED_BINDINGS_H
12
+
13
+ #ifdef __cuda_std__
14
+
15
+ # if defined(_LIBCUDACXX_COMPILER_CLANG)
16
+ # pragma clang diagnostic push
17
+ # pragma clang diagnostic ignored "-Wmismatched-tags"
18
+ # endif // _LIBCUDACXX_COMPILER_CLANG
19
+
20
+ # if !defined(__CUDACC_RTC__)
21
+ // Fetch utility to get primary template for ::std::tuple_size necessary for the specialization of
22
+ // ::std::tuple_size<cuda::std::tuple> to enable structured bindings.
23
+ // See https://github.com/NVIDIA/libcudacxx/issues/316
24
+ # include <utility>
25
+ # endif
26
+
27
+ # include "../__fwd/array.h"
28
+ # include "../__fwd/pair.h"
29
+ # include "../__fwd/tuple.h"
30
+ # include "../__tuple_dir/tuple_element.h"
31
+ # include "../__tuple_dir/tuple_size.h"
32
+ # include "../__type_traits/integral_constant.h"
33
+
34
+ // This is a workaround for the fact that structured bindings require that the specializations of
35
+ // `tuple_size` and `tuple_element` reside in namespace std (https://eel.is/c++draft/dcl.struct.bind#4).
36
+ // See https://github.com/NVIDIA/libcudacxx/issues/316 for a short discussion
37
+ # if _LIBCUDACXX_STD_VER > 14
38
+ namespace std {
39
+ # if defined(__CUDACC_RTC__)
40
+ template <class... _Tp>
41
+ struct tuple_size;
42
+
43
+ template <size_t _Ip, class... _Tp>
44
+ struct tuple_element;
45
+ # endif
46
+
47
+ template <class _Tp, size_t _Size>
48
+ struct tuple_size<_CUDA_VSTD::array<_Tp, _Size>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::array<_Tp, _Size>>
49
+ {};
50
+
51
+ template <class _Tp, size_t _Size>
52
+ struct tuple_size<const _CUDA_VSTD::array<_Tp, _Size>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::array<_Tp, _Size>>
53
+ {};
54
+
55
+ template <class _Tp, size_t _Size>
56
+ struct tuple_size<volatile _CUDA_VSTD::array<_Tp, _Size>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::array<_Tp, _Size>>
57
+ {};
58
+
59
+ template <class _Tp, size_t _Size>
60
+ struct tuple_size<const volatile _CUDA_VSTD::array<_Tp, _Size>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::array<_Tp, _Size>>
61
+ {};
62
+
63
+ template <size_t _Ip, class _Tp, size_t _Size>
64
+ struct tuple_element<_Ip, _CUDA_VSTD::array<_Tp, _Size>> : _CUDA_VSTD::tuple_element<_Ip, _CUDA_VSTD::array<_Tp, _Size>>
65
+ {};
66
+
67
+ template <size_t _Ip, class _Tp, size_t _Size>
68
+ struct tuple_element<_Ip, const _CUDA_VSTD::array<_Tp, _Size>>
69
+ : _CUDA_VSTD::tuple_element<_Ip, const _CUDA_VSTD::array<_Tp, _Size>>
70
+ {};
71
+
72
+ template <size_t _Ip, class _Tp, size_t _Size>
73
+ struct tuple_element<_Ip, volatile _CUDA_VSTD::array<_Tp, _Size>>
74
+ : _CUDA_VSTD::tuple_element<_Ip, volatile _CUDA_VSTD::array<_Tp, _Size>>
75
+ {};
76
+
77
+ template <size_t _Ip, class _Tp, size_t _Size>
78
+ struct tuple_element<_Ip, const volatile _CUDA_VSTD::array<_Tp, _Size>>
79
+ : _CUDA_VSTD::tuple_element<_Ip, const volatile _CUDA_VSTD::array<_Tp, _Size>>
80
+ {};
81
+
82
+ template <class _Tp, class _Up>
83
+ struct tuple_size<_CUDA_VSTD::pair<_Tp, _Up>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::pair<_Tp, _Up>>
84
+ {};
85
+
86
+ template <class _Tp, class _Up>
87
+ struct tuple_size<const _CUDA_VSTD::pair<_Tp, _Up>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::pair<_Tp, _Up>>
88
+ {};
89
+
90
+ template <class _Tp, class _Up>
91
+ struct tuple_size<volatile _CUDA_VSTD::pair<_Tp, _Up>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::pair<_Tp, _Up>>
92
+ {};
93
+
94
+ template <class _Tp, class _Up>
95
+ struct tuple_size<const volatile _CUDA_VSTD::pair<_Tp, _Up>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::pair<_Tp, _Up>>
96
+ {};
97
+
98
+ template <size_t _Ip, class _Tp, class _Up>
99
+ struct tuple_element<_Ip, _CUDA_VSTD::pair<_Tp, _Up>> : _CUDA_VSTD::tuple_element<_Ip, _CUDA_VSTD::pair<_Tp, _Up>>
100
+ {};
101
+
102
+ template <size_t _Ip, class _Tp, class _Up>
103
+ struct tuple_element<_Ip, const _CUDA_VSTD::pair<_Tp, _Up>>
104
+ : _CUDA_VSTD::tuple_element<_Ip, const _CUDA_VSTD::pair<_Tp, _Up>>
105
+ {};
106
+
107
+ template <size_t _Ip, class _Tp, class _Up>
108
+ struct tuple_element<_Ip, volatile _CUDA_VSTD::pair<_Tp, _Up>>
109
+ : _CUDA_VSTD::tuple_element<_Ip, volatile _CUDA_VSTD::pair<_Tp, _Up>>
110
+ {};
111
+
112
+ template <size_t _Ip, class _Tp, class _Up>
113
+ struct tuple_element<_Ip, const volatile _CUDA_VSTD::pair<_Tp, _Up>>
114
+ : _CUDA_VSTD::tuple_element<_Ip, const volatile _CUDA_VSTD::pair<_Tp, _Up>>
115
+ {};
116
+
117
+ template <class... _Tp>
118
+ struct tuple_size<_CUDA_VSTD::tuple<_Tp...>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::tuple<_Tp...>>
119
+ {};
120
+
121
+ template <class... _Tp>
122
+ struct tuple_size<const _CUDA_VSTD::tuple<_Tp...>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::tuple<_Tp...>>
123
+ {};
124
+
125
+ template <class... _Tp>
126
+ struct tuple_size<volatile _CUDA_VSTD::tuple<_Tp...>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::tuple<_Tp...>>
127
+ {};
128
+
129
+ template <class... _Tp>
130
+ struct tuple_size<const volatile _CUDA_VSTD::tuple<_Tp...>> : _CUDA_VSTD::tuple_size<_CUDA_VSTD::tuple<_Tp...>>
131
+ {};
132
+
133
+ template <size_t _Ip, class... _Tp>
134
+ struct tuple_element<_Ip, _CUDA_VSTD::tuple<_Tp...>> : _CUDA_VSTD::tuple_element<_Ip, _CUDA_VSTD::tuple<_Tp...>>
135
+ {};
136
+
137
+ template <size_t _Ip, class... _Tp>
138
+ struct tuple_element<_Ip, const _CUDA_VSTD::tuple<_Tp...>>
139
+ : _CUDA_VSTD::tuple_element<_Ip, const _CUDA_VSTD::tuple<_Tp...>>
140
+ {};
141
+
142
+ template <size_t _Ip, class... _Tp>
143
+ struct tuple_element<_Ip, volatile _CUDA_VSTD::tuple<_Tp...>>
144
+ : _CUDA_VSTD::tuple_element<_Ip, volatile _CUDA_VSTD::tuple<_Tp...>>
145
+ {};
146
+
147
+ template <size_t _Ip, class... _Tp>
148
+ struct tuple_element<_Ip, const volatile _CUDA_VSTD::tuple<_Tp...>>
149
+ : _CUDA_VSTD::tuple_element<_Ip, const volatile _CUDA_VSTD::tuple<_Tp...>>
150
+ {};
151
+ } // namespace std
152
+ # endif // _LIBCUDACXX_STD_VER > 14
153
+
154
+ # if defined(_LIBCUDACXX_COMPILER_CLANG)
155
+ # pragma clang diagnostic pop
156
+ # endif // _LIBCUDACXX_COMPILER_CLANG
157
+
158
+ #endif // __cuda_std__
159
+
160
+ #endif // _LIBCUDACXX___TUPLE_STRUCTURED_BINDINGS_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_element.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_TUPLE_ELEMENT_H
11
+ #define _LIBCUDACXX___TUPLE_TUPLE_ELEMENT_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__tuple_dir/tuple_indices.h"
18
+ #include "../__tuple_dir/tuple_types.h"
19
+ #include "../__type_traits/add_const.h"
20
+ #include "../__type_traits/add_cv.h"
21
+ #include "../__type_traits/add_volatile.h"
22
+ #include "../cstddef"
23
+
24
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
25
+ # pragma GCC system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
27
+ # pragma clang system_header
28
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
29
+ # pragma system_header
30
+ #endif // no system header
31
+
32
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
33
+
34
+ template <size_t _Ip, class _Tp>
35
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_element;
36
+
37
+ template <size_t _Ip, class... _Tp>
38
+ using __tuple_element_t _LIBCUDACXX_NODEBUG_TYPE = typename tuple_element<_Ip, _Tp...>::type;
39
+
40
+ template <size_t _Ip, class _Tp>
41
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, const _Tp>
42
+ {
43
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename add_const<__tuple_element_t<_Ip, _Tp>>::type type;
44
+ };
45
+
46
+ template <size_t _Ip, class _Tp>
47
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, volatile _Tp>
48
+ {
49
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename add_volatile<__tuple_element_t<_Ip, _Tp>>::type type;
50
+ };
51
+
52
+ template <size_t _Ip, class _Tp>
53
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, const volatile _Tp>
54
+ {
55
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename add_cv<__tuple_element_t<_Ip, _Tp>>::type type;
56
+ };
57
+
58
+ #ifdef _LIBCUDACXX_COMPILER_MSVC
59
+
60
+ namespace __indexer_detail {
61
+
62
+ template <size_t _Idx, class... _Types>
63
+ struct _nth_of;
64
+
65
+ template <class _Head, class... _Tail>
66
+ struct _nth_of<0, _Head, _Tail...>
67
+ {
68
+ using type = _Head;
69
+ };
70
+
71
+ template <size_t _Idx, class _Head, class... _Tail>
72
+ struct _nth_of<_Idx, _Head, _Tail...>
73
+ {
74
+ using type = typename _nth_of<_Idx - 1, _Tail...>::type;
75
+ };
76
+
77
+ template <size_t _Idx, class... _Types>
78
+ struct nth_of
79
+ {
80
+ static_assert(_Idx < sizeof...(_Types), "");
81
+ using _impl = _nth_of<_Idx, _Types...>;
82
+ using type = typename _impl::type;
83
+ };
84
+
85
+ } // namespace __indexer_detail
86
+
87
+ template <size_t _Idx, class... _Types>
88
+ using __type_pack_element _LIBCUDACXX_NODEBUG_TYPE = typename __indexer_detail::nth_of<_Idx, _Types...>::type;
89
+
90
+ #elif !__has_builtin(__type_pack_element)
91
+
92
+ namespace __indexer_detail {
93
+
94
+ template <size_t _Idx, class _Tp>
95
+ struct __indexed
96
+ {
97
+ using type _LIBCUDACXX_NODEBUG_TYPE = _Tp;
98
+ };
99
+
100
+ template <class _Types, class _Indexes>
101
+ struct __indexer;
102
+
103
+ template <class... _Types, size_t... _Idx>
104
+ struct __indexer<__tuple_types<_Types...>, __tuple_indices<_Idx...>> : __indexed<_Idx, _Types>...
105
+ {};
106
+
107
+ template <size_t _Idx, class _Tp>
108
+ _LIBCUDACXX_INLINE_VISIBILITY __indexed<_Idx, _Tp> __at_index(__indexed<_Idx, _Tp> const&);
109
+
110
+ } // namespace __indexer_detail
111
+
112
+ template <size_t _Idx, class... _Types>
113
+ using __type_pack_element _LIBCUDACXX_NODEBUG_TYPE =
114
+ typename decltype(__indexer_detail::__at_index<_Idx>(__indexer_detail::__indexer< __tuple_types<_Types...>,
115
+ __make_tuple_indices_t<sizeof...(_Types)>>{}))::type;
116
+ #endif
117
+
118
+ template <size_t _Ip, class... _Types>
119
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_element<_Ip, __tuple_types<_Types...> >
120
+ {
121
+ static_assert(_Ip < sizeof...(_Types), "tuple_element index out of range");
122
+ typedef _LIBCUDACXX_NODEBUG_TYPE __type_pack_element<_Ip, _Types...> type;
123
+ };
124
+
125
+ #if _LIBCUDACXX_STD_VER > 11
126
+ template <size_t _Ip, class... _Tp>
127
+ using tuple_element_t _LIBCUDACXX_NODEBUG_TYPE = typename tuple_element<_Ip, _Tp...>::type;
128
+ #endif
129
+
130
+ _LIBCUDACXX_END_NAMESPACE_STD
131
+
132
+ #endif // _LIBCUDACXX___TUPLE_TUPLE_ELEMENT_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_indices.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_MAKE_TUPLE_INDICES_H
11
+ #define _LIBCUDACXX___TUPLE_MAKE_TUPLE_INDICES_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__utility/integer_sequence.h"
18
+ #include "../cstddef"
19
+
20
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
21
+ # pragma GCC system_header
22
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
23
+ # pragma clang system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
25
+ # pragma system_header
26
+ #endif // no system header
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ template <size_t _Ep, size_t _Sp = 0>
31
+ struct __make_tuple_indices
32
+ {
33
+ static_assert(_Sp <= _Ep, "__make_tuple_indices input error");
34
+ typedef __make_indices_imp<_Ep, _Sp> type;
35
+ };
36
+
37
+ template <size_t _Ep, size_t _Sp = 0>
38
+ using __make_tuple_indices_t = typename __make_tuple_indices<_Ep, _Sp>::type;
39
+
40
+ _LIBCUDACXX_END_NAMESPACE_STD
41
+
42
+ #endif // _LIBCUDACXX___TUPLE_MAKE_TUPLE_INDICES_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_like.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_TUPLE_LIKE_H
11
+ #define _LIBCUDACXX___TUPLE_TUPLE_LIKE_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/array.h"
18
+ #include "../__fwd/pair.h"
19
+ #include "../__fwd/tuple.h"
20
+ #include "../__tuple_dir/tuple_types.h"
21
+ #include "../__type_traits/integral_constant.h"
22
+ #include "../cstddef"
23
+
24
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
25
+ # pragma GCC system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
27
+ # pragma clang system_header
28
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
29
+ # pragma system_header
30
+ #endif // no system header
31
+
32
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
33
+
34
+ template <class _Tp>
35
+ struct __tuple_like : false_type
36
+ {};
37
+
38
+ template <class _Tp>
39
+ struct __tuple_like<const _Tp> : public __tuple_like<_Tp>
40
+ {};
41
+ template <class _Tp>
42
+ struct __tuple_like<volatile _Tp> : public __tuple_like<_Tp>
43
+ {};
44
+ template <class _Tp>
45
+ struct __tuple_like<const volatile _Tp> : public __tuple_like<_Tp>
46
+ {};
47
+
48
+ template <class... _Tp>
49
+ struct __tuple_like<tuple<_Tp...> > : true_type
50
+ {};
51
+
52
+ template <class _T1, class _T2>
53
+ struct __tuple_like<pair<_T1, _T2> > : true_type
54
+ {};
55
+
56
+ template <class _Tp, size_t _Size>
57
+ struct __tuple_like<array<_Tp, _Size> > : true_type
58
+ {};
59
+
60
+ template <class... _Tp>
61
+ struct __tuple_like<__tuple_types<_Tp...> > : true_type
62
+ {};
63
+
64
+ _LIBCUDACXX_END_NAMESPACE_STD
65
+
66
+ #endif // _LIBCUDACXX___TUPLE_TUPLE_LIKE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_size.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_TUPLE_SIZE_H
11
+ #define _LIBCUDACXX___TUPLE_TUPLE_SIZE_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/tuple.h"
18
+ #include "../__tuple_dir/tuple_types.h"
19
+ #include "../__type_traits/enable_if.h"
20
+ #include "../__type_traits/integral_constant.h"
21
+ #include "../__type_traits/is_const.h"
22
+ #include "../__type_traits/is_volatile.h"
23
+ #include "../cstddef"
24
+
25
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
26
+ # pragma GCC system_header
27
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
28
+ # pragma clang system_header
29
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
30
+ # pragma system_header
31
+ #endif // no system header
32
+
33
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
34
+
35
+ template <class _Tp>
36
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_size;
37
+
38
+ template <class _Tp, class...>
39
+ using __enable_if_tuple_size_imp = _Tp;
40
+
41
+ template <class _Tp>
42
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<__enable_if_tuple_size_imp< const _Tp,
43
+ __enable_if_t<!is_volatile<_Tp>::value>, integral_constant<size_t, sizeof(tuple_size<_Tp>)>>>
44
+ : public integral_constant<size_t, tuple_size<_Tp>::value>
45
+ {};
46
+
47
+ template <class _Tp>
48
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<__enable_if_tuple_size_imp< volatile _Tp,
49
+ __enable_if_t<!is_const<_Tp>::value>, integral_constant<size_t, sizeof(tuple_size<_Tp>)>>>
50
+ : public integral_constant<size_t, tuple_size<_Tp>::value>
51
+ {};
52
+
53
+ template <class _Tp>
54
+ struct _LIBCUDACXX_TEMPLATE_VIS
55
+ tuple_size<__enable_if_tuple_size_imp< const volatile _Tp, integral_constant<size_t, sizeof(tuple_size<_Tp>)>>>
56
+ : public integral_constant<size_t, tuple_size<_Tp>::value>
57
+ {};
58
+
59
+ template <class... _Tp>
60
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<tuple<_Tp...> > : public integral_constant<size_t, sizeof...(_Tp)>
61
+ {};
62
+
63
+ template <class... _Tp>
64
+ struct _LIBCUDACXX_TEMPLATE_VIS tuple_size<__tuple_types<_Tp...> > : public integral_constant<size_t, sizeof...(_Tp)>
65
+ {};
66
+
67
+ #if _LIBCUDACXX_STD_VER >= 17
68
+ template <class _Tp>
69
+ _LIBCUDACXX_INLINE_VAR constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
70
+ #endif // _LIBCUDACXX_STD_VER >= 17
71
+
72
+ _LIBCUDACXX_END_NAMESPACE_STD
73
+
74
+ #endif // _LIBCUDACXX___TUPLE_TUPLE_SIZE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__tuple_dir/tuple_types.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TUPLE_TUPLE_TYPES_H
11
+ #define _LIBCUDACXX___TUPLE_TUPLE_TYPES_H
12
+
13
+ #ifndef __cuda_std__
14
+ # include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class... _Tp>
28
+ struct __tuple_types
29
+ {};
30
+
31
+ _LIBCUDACXX_END_NAMESPACE_STD
32
+
33
+ #endif // _LIBCUDACXX___TUPLE_TUPLE_TYPES_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_const.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_CONST_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_CONST_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS add_const {
28
+ typedef _LIBCUDACXX_NODEBUG_TYPE const _Tp type;
29
+ };
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11
32
+ template <class _Tp> using add_const_t = typename add_const<_Tp>::type;
33
+ #endif
34
+
35
+ _LIBCUDACXX_END_NAMESPACE_STD
36
+
37
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_CONST_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_cv.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_CV_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_CV_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS add_cv {
28
+ typedef _LIBCUDACXX_NODEBUG_TYPE const volatile _Tp type;
29
+ };
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11
32
+ template <class _Tp> using add_cv_t = typename add_cv<_Tp>::type;
33
+ #endif
34
+
35
+ _LIBCUDACXX_END_NAMESPACE_STD
36
+
37
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_CV_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_lvalue_reference.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_LVALUE_REFERENCE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_LVALUE_REFERENCE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_referenceable.h"
18
+
19
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
20
+ # pragma GCC system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
22
+ # pragma clang system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
24
+ # pragma system_header
25
+ #endif // no system header
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #if defined(_LIBCUDACXX_ADD_LVALUE_REFERENCE) && !defined(_LIBCUDACXX_USE_ADD_LVALUE_REFERENCE_FALLBACK)
30
+
31
+ template <class _Tp>
32
+ using __add_lvalue_reference_t = _LIBCUDACXX_ADD_LVALUE_REFERENCE(_Tp);
33
+
34
+ #else
35
+
36
+ template <class _Tp, bool = __libcpp_is_referenceable<_Tp>::value>
37
+ struct __add_lvalue_reference_impl {
38
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;
39
+ };
40
+ template <class _Tp >
41
+ struct __add_lvalue_reference_impl<_Tp, true> {
42
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp& type;
43
+ };
44
+
45
+ template <class _Tp>
46
+ using __add_lvalue_reference_t = typename __add_lvalue_reference_impl<_Tp>::type;
47
+
48
+ #endif // defined(_LIBCUDACXX_ADD_LVALUE_REFERENCE) && !defined(_LIBCUDACXX_USE_ADD_LVALUE_REFERENCE_FALLBACK)
49
+
50
+ template <class _Tp>
51
+ struct add_lvalue_reference {
52
+ using type _LIBCUDACXX_NODEBUG_TYPE = __add_lvalue_reference_t<_Tp>;
53
+ };
54
+
55
+ #if _LIBCUDACXX_STD_VER > 11
56
+ template <class _Tp> using add_lvalue_reference_t = __add_lvalue_reference_t<_Tp>;
57
+ #endif
58
+
59
+ _LIBCUDACXX_END_NAMESPACE_STD
60
+
61
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_LVALUE_REFERENCE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_pointer.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_POINTER_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_POINTER_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_referenceable.h"
18
+ #include "../__type_traits/is_same.h"
19
+ #include "../__type_traits/is_void.h"
20
+ #include "../__type_traits/remove_cv.h"
21
+ #include "../__type_traits/remove_reference.h"
22
+
23
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
24
+ # pragma GCC system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
26
+ # pragma clang system_header
27
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
28
+ # pragma system_header
29
+ #endif // no system header
30
+
31
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
32
+
33
+ #if defined(_LIBCUDACXX_ADD_POINTER) && !defined(_LIBCUDACXX_USE_ADD_POINTER_FALLBACK)
34
+
35
+ template <class _Tp>
36
+ using __add_pointer_t = _LIBCUDACXX_ADD_POINTER(_Tp);
37
+
38
+ #else
39
+ template <class _Tp,
40
+ bool = __libcpp_is_referenceable<_Tp>::value || is_void<_Tp>::value>
41
+ struct __add_pointer_impl {
42
+ typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tp>* type;
43
+ };
44
+ template <class _Tp> struct __add_pointer_impl<_Tp, false>
45
+ {typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;};
46
+
47
+ template <class _Tp>
48
+ using __add_pointer_t = typename __add_pointer_impl<_Tp>::type;
49
+
50
+ #endif // defined(_LIBCUDACXX_ADD_POINTER) && !defined(_LIBCUDACXX_USE_ADD_POINTER_FALLBACK)
51
+
52
+ template <class _Tp>
53
+ struct add_pointer {
54
+ using type _LIBCUDACXX_NODEBUG_TYPE = __add_pointer_t<_Tp>;
55
+ };
56
+
57
+ #if _LIBCUDACXX_STD_VER > 11
58
+ template <class _Tp> using add_pointer_t = __add_pointer_t<_Tp>;
59
+ #endif
60
+
61
+ _LIBCUDACXX_END_NAMESPACE_STD
62
+
63
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_POINTER_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_rvalue_reference.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_RVALUE_REFERENCE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_RVALUE_REFERENCE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_referenceable.h"
18
+
19
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
20
+ # pragma GCC system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
22
+ # pragma clang system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
24
+ # pragma system_header
25
+ #endif // no system header
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #if defined(_LIBCUDACXX_ADD_RVALUE_REFERENCE) && !defined(_LIBCUDACXX_USE_ADD_RVALUE_REFERENCE_FALLBACK)
30
+
31
+ template <class _Tp>
32
+ using __add_rvalue_reference_t = _LIBCUDACXX_ADD_RVALUE_REFERENCE(_Tp);
33
+
34
+ #else
35
+
36
+ template <class _Tp, bool = __libcpp_is_referenceable<_Tp>::value>
37
+ struct __add_rvalue_reference_impl {
38
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type;
39
+ };
40
+ template <class _Tp >
41
+ struct __add_rvalue_reference_impl<_Tp, true> {
42
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Tp&& type;
43
+ };
44
+
45
+ template <class _Tp>
46
+ using __add_rvalue_reference_t = typename __add_rvalue_reference_impl<_Tp>::type;
47
+
48
+ #endif // defined(_LIBCUDACXX_ADD_RVALUE_REFERENCE) && !defined(_LIBCUDACXX_USE_ADD_RVALUE_REFERENCE_FALLBACK)
49
+
50
+ template <class _Tp>
51
+ struct add_rvalue_reference {
52
+ using type = __add_rvalue_reference_t<_Tp>;
53
+ };
54
+
55
+ #if _LIBCUDACXX_STD_VER > 11
56
+ template <class _Tp>
57
+ using add_rvalue_reference_t = __add_rvalue_reference_t<_Tp>;
58
+ #endif
59
+
60
+ _LIBCUDACXX_END_NAMESPACE_STD
61
+
62
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_RVALUE_REFERENCE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/add_volatile.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ADD_VOLATILE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ADD_VOLATILE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS add_volatile {
28
+ typedef _LIBCUDACXX_NODEBUG_TYPE volatile _Tp type;
29
+ };
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11
32
+ template <class _Tp> using add_volatile_t = typename add_volatile<_Tp>::type;
33
+ #endif
34
+
35
+ _LIBCUDACXX_END_NAMESPACE_STD
36
+
37
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ADD_VOLATILE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/aligned_storage.h ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ //
7
+ //===----------------------------------------------------------------------===//
8
+
9
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ALIGNED_STORAGE_H
10
+ #define _LIBCUDACXX___TYPE_TRAITS_ALIGNED_STORAGE_H
11
+
12
+ #ifndef __cuda_std__
13
+ #include <__config>
14
+ #endif // __cuda_std__
15
+
16
+ #include "../__type_traits/conditional.h"
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/nat.h"
19
+ #include "../__type_traits/type_list.h"
20
+ #include "../cstddef"
21
+
22
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
23
+ # pragma GCC system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
25
+ # pragma clang system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
27
+ # pragma system_header
28
+ #endif // no system header
29
+
30
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
31
+
32
+ template <class _Tp>
33
+ struct __align_type
34
+ {
35
+ static const size_t value = _LIBCUDACXX_PREFERRED_ALIGNOF(_Tp);
36
+ typedef _Tp type;
37
+ };
38
+
39
+ struct __struct_double {long double __lx;};
40
+ struct __struct_double4 {double __lx[4];};
41
+
42
+ typedef
43
+ __type_list<__align_type<unsigned char>,
44
+ __type_list<__align_type<unsigned short>,
45
+ __type_list<__align_type<unsigned int>,
46
+ __type_list<__align_type<unsigned long>,
47
+ __type_list<__align_type<unsigned long long>,
48
+ __type_list<__align_type<double>,
49
+ __type_list<__align_type<long double>,
50
+ __type_list<__align_type<__struct_double>,
51
+ __type_list<__align_type<__struct_double4>,
52
+ __type_list<__align_type<int*>,
53
+ __nat
54
+ > > > > > > > > > > __all_types;
55
+
56
+ template <size_t _Align>
57
+ struct _ALIGNAS(_Align) __fallback_overaligned {};
58
+
59
+ template <class _TL, size_t _Align> struct __find_pod;
60
+
61
+ template <class _Hp, size_t _Align>
62
+ struct __find_pod<__type_list<_Hp, __nat>, _Align>
63
+ {
64
+ typedef __conditional_t<_Align == _Hp::value, typename _Hp::type, __fallback_overaligned<_Align> > type;
65
+ };
66
+
67
+ template <class _Hp, class _Tp, size_t _Align>
68
+ struct __find_pod<__type_list<_Hp, _Tp>, _Align>
69
+ {
70
+ typedef __conditional_t<_Align == _Hp::value, typename _Hp::type, typename __find_pod<_Tp, _Align>::type> type;
71
+ };
72
+
73
+ template <class _TL, size_t _Len> struct __find_max_align;
74
+
75
+ template <class _Hp, size_t _Len>
76
+ struct __find_max_align<__type_list<_Hp, __nat>, _Len> : public integral_constant<size_t, _Hp::value> {};
77
+
78
+ template <size_t _Len, size_t _A1, size_t _A2>
79
+ struct __select_align
80
+ {
81
+ private:
82
+ static const size_t __min = _A2 < _A1 ? _A2 : _A1;
83
+ static const size_t __max = _A1 < _A2 ? _A2 : _A1;
84
+ public:
85
+ static const size_t value = _Len < __max ? __min : __max;
86
+ };
87
+
88
+ template <class _Hp, class _Tp, size_t _Len>
89
+ struct __find_max_align<__type_list<_Hp, _Tp>, _Len>
90
+ : public integral_constant<size_t, __select_align<_Len, _Hp::value, __find_max_align<_Tp, _Len>::value>::value> {};
91
+
92
+ template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
93
+ struct _LIBCUDACXX_TEMPLATE_VIS aligned_storage
94
+ {
95
+ typedef typename __find_pod<__all_types, _Align>::type _Aligner;
96
+ union type
97
+ {
98
+ _Aligner __align;
99
+ unsigned char __data[(_Len + _Align - 1)/_Align * _Align];
100
+ };
101
+ };
102
+
103
+ #if _LIBCUDACXX_STD_VER > 11
104
+ template <size_t _Len, size_t _Align = __find_max_align<__all_types, _Len>::value>
105
+ using aligned_storage_t = typename aligned_storage<_Len, _Align>::type;
106
+ #endif
107
+
108
+ #define _CREATE_ALIGNED_STORAGE_SPECIALIZATION(n) \
109
+ template <size_t _Len>\
110
+ struct _LIBCUDACXX_TEMPLATE_VIS aligned_storage<_Len, n>\
111
+ {\
112
+ struct _ALIGNAS(n) type\
113
+ {\
114
+ unsigned char __lx[(_Len + n - 1)/n * n];\
115
+ };\
116
+ }
117
+
118
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1);
119
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2);
120
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4);
121
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x8);
122
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x10);
123
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x20);
124
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x40);
125
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x80);
126
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x100);
127
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x200);
128
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x400);
129
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x800);
130
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x1000);
131
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x2000);
132
+ // PE/COFF does not support alignment beyond 8192 (=0x2000)
133
+ #if !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF)
134
+ _CREATE_ALIGNED_STORAGE_SPECIALIZATION(0x4000);
135
+ #endif // !defined(_LIBCUDACXX_OBJECT_FORMAT_COFF)
136
+
137
+ #undef _CREATE_ALIGNED_STORAGE_SPECIALIZATION
138
+
139
+ _LIBCUDACXX_END_NAMESPACE_STD
140
+
141
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ALIGNED_STORAGE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/aligned_union.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ALIGNED_UNION_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ALIGNED_UNION_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/aligned_storage.h"
18
+ #include "../__type_traits/integral_constant.h"
19
+ #include "../cstddef"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ template <size_t _I0, size_t ..._In>
32
+ struct __static_max;
33
+
34
+ template <size_t _I0>
35
+ struct __static_max<_I0>
36
+ {
37
+ static const size_t value = _I0;
38
+ };
39
+
40
+ template <size_t _I0, size_t _I1, size_t ..._In>
41
+ struct __static_max<_I0, _I1, _In...>
42
+ {
43
+ static const size_t value = _I0 >= _I1 ? __static_max<_I0, _In...>::value :
44
+ __static_max<_I1, _In...>::value;
45
+ };
46
+
47
+ template <size_t _Len, class _Type0, class ..._Types>
48
+ struct aligned_union
49
+ {
50
+ static const size_t alignment_value = __static_max<_LIBCUDACXX_PREFERRED_ALIGNOF(_Type0),
51
+ _LIBCUDACXX_PREFERRED_ALIGNOF(_Types)...>::value;
52
+ static const size_t __len = __static_max<_Len, sizeof(_Type0),
53
+ sizeof(_Types)...>::value;
54
+ typedef typename aligned_storage<__len, alignment_value>::type type;
55
+ };
56
+
57
+ #if _LIBCUDACXX_STD_VER > 11
58
+ template <size_t _Len, class ..._Types> using aligned_union_t = typename aligned_union<_Len, _Types...>::type;
59
+ #endif
60
+
61
+ _LIBCUDACXX_END_NAMESPACE_STD
62
+
63
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ALIGNED_UNION_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/alignment_of.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ALIGNMENT_OF_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ALIGNMENT_OF_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../cstddef"
19
+
20
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
21
+ # pragma GCC system_header
22
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
23
+ # pragma clang system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
25
+ # pragma system_header
26
+ #endif // no system header
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS alignment_of
31
+ : public integral_constant<size_t, _LIBCUDACXX_ALIGNOF(_Tp)> {};
32
+
33
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
34
+ template <class _Tp>
35
+ _LIBCUDACXX_INLINE_VAR constexpr size_t alignment_of_v = _LIBCUDACXX_ALIGNOF(_Tp);
36
+ #endif
37
+
38
+ _LIBCUDACXX_END_NAMESPACE_STD
39
+
40
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ALIGNMENT_OF_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/apply_cv.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_APPLY_CV_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_APPLY_CV_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/is_const.h"
18
+ #include "../__type_traits/is_volatile.h"
19
+ #include "../__type_traits/remove_reference.h"
20
+ #include "../cstddef"
21
+
22
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
23
+ # pragma GCC system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
25
+ # pragma clang system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
27
+ # pragma system_header
28
+ #endif // no system header
29
+
30
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
31
+
32
+ template <class _Tp, class _Up, bool = is_const<__libcpp_remove_reference_t<_Tp> >::value,
33
+ bool = is_volatile<__libcpp_remove_reference_t<_Tp> >::value>
34
+ struct __apply_cv
35
+ {
36
+ typedef _LIBCUDACXX_NODEBUG_TYPE _Up type;
37
+ };
38
+
39
+ template <class _Tp, class _Up>
40
+ struct __apply_cv<_Tp, _Up, true, false>
41
+ {
42
+ typedef _LIBCUDACXX_NODEBUG_TYPE const _Up type;
43
+ };
44
+
45
+ template <class _Tp, class _Up>
46
+ struct __apply_cv<_Tp, _Up, false, true>
47
+ {
48
+ typedef volatile _Up type;
49
+ };
50
+
51
+ template <class _Tp, class _Up>
52
+ struct __apply_cv<_Tp, _Up, true, true>
53
+ {
54
+ typedef const volatile _Up type;
55
+ };
56
+
57
+ template <class _Tp, class _Up>
58
+ struct __apply_cv<_Tp&, _Up, false, false>
59
+ {
60
+ typedef _Up& type;
61
+ };
62
+
63
+ template <class _Tp, class _Up>
64
+ struct __apply_cv<_Tp&, _Up, true, false>
65
+ {
66
+ typedef const _Up& type;
67
+ };
68
+
69
+ template <class _Tp, class _Up>
70
+ struct __apply_cv<_Tp&, _Up, false, true>
71
+ {
72
+ typedef volatile _Up& type;
73
+ };
74
+
75
+ template <class _Tp, class _Up>
76
+ struct __apply_cv<_Tp&, _Up, true, true>
77
+ {
78
+ typedef const volatile _Up& type;
79
+ };
80
+
81
+ _LIBCUDACXX_END_NAMESPACE_STD
82
+
83
+ #endif // _LIBCUDACXX___TYPE_TRAITS_APPLY_CV_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/can_extract_key.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_CAN_EXTRACT_KEY_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_CAN_EXTRACT_KEY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__fwd/pair.h"
18
+ #include "../__type_traits/conditional.h"
19
+ #include "../__type_traits/integral_constant.h"
20
+ #include "../__type_traits/is_same.h"
21
+ #include "../__type_traits/remove_const.h"
22
+ #include "../__type_traits/remove_const_ref.h"
23
+
24
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
25
+ # pragma GCC system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
27
+ # pragma clang system_header
28
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
29
+ # pragma system_header
30
+ #endif // no system header
31
+
32
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
33
+
34
+ // These traits are used in __tree and __hash_table
35
+ struct __extract_key_fail_tag {};
36
+ struct __extract_key_self_tag {};
37
+ struct __extract_key_first_tag {};
38
+
39
+ template <class _ValTy, class _Key, class _RawValTy = __remove_const_ref_t<_ValTy> >
40
+ struct __can_extract_key
41
+ : __conditional_t<_IsSame<_RawValTy, _Key>::value, __extract_key_self_tag, __extract_key_fail_tag> {};
42
+
43
+ template <class _Pair, class _Key, class _First, class _Second>
44
+ struct __can_extract_key<_Pair, _Key, pair<_First, _Second> >
45
+ : __conditional_t<_IsSame<__remove_const_t<_First>, _Key>::value, __extract_key_first_tag, __extract_key_fail_tag> {
46
+ };
47
+
48
+ // __can_extract_map_key uses true_type/false_type instead of the tags.
49
+ // It returns true if _Key != _ContainerValueTy (the container is a map not a set)
50
+ // and _ValTy == _Key.
51
+ template <class _ValTy, class _Key, class _ContainerValueTy,
52
+ class _RawValTy = __remove_const_ref_t<_ValTy> >
53
+ struct __can_extract_map_key
54
+ : integral_constant<bool, _IsSame<_RawValTy, _Key>::value> {};
55
+
56
+ // This specialization returns __extract_key_fail_tag for non-map containers
57
+ // because _Key == _ContainerValueTy
58
+ template <class _ValTy, class _Key, class _RawValTy>
59
+ struct __can_extract_map_key<_ValTy, _Key, _Key, _RawValTy>
60
+ : false_type {};
61
+
62
+ _LIBCUDACXX_END_NAMESPACE_STD
63
+
64
+ #endif // _LIBCUDACXX___TYPE_TRAITS_CAN_EXTRACT_KEY_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/common_reference.h ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ // SPDX-FileCopyrightText: Copyright (c) Microsoft Corporation.
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_COMMON_REFERENCE_H
12
+ #define _LIBCUDACXX___TYPE_TRAITS_COMMON_REFERENCE_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__type_traits/common_type.h"
19
+ #include "../__type_traits/conditional.h"
20
+ #include "../__type_traits/copy_cv.h"
21
+ #include "../__type_traits/copy_cvref.h"
22
+ #include "../__type_traits/disjunction.h"
23
+ #include "../__type_traits/enable_if.h"
24
+ #include "../__type_traits/is_array.h"
25
+ #include "../__type_traits/is_convertible.h"
26
+ #include "../__type_traits/is_reference.h"
27
+ #include "../__type_traits/is_same.h"
28
+ #include "../__type_traits/is_scalar.h"
29
+ #include "../__type_traits/remove_reference.h"
30
+ #include "../__type_traits/remove_cvref.h"
31
+ #include "../__type_traits/void_t.h"
32
+ #include "../__utility/declval.h"
33
+
34
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
35
+ # pragma GCC system_header
36
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
37
+ # pragma clang system_header
38
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
39
+ # pragma system_header
40
+ #endif // no system header
41
+
42
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
43
+
44
+ // common_reference
45
+ #if _LIBCUDACXX_STD_VER > 11
46
+
47
+ // Let COND_RES(X, Y) be:
48
+ #ifdef _LIBCUDACXX_COMPILER_MSVC // Workaround for DevCom-1627396
49
+ template <class _Tp>
50
+ _Tp __returns_exactly() noexcept; // not defined
51
+
52
+ template <class _Xp, class _Yp>
53
+ using __cond_res_if_right = decltype(false ? __returns_exactly<_Xp>() : __returns_exactly<_Yp>());
54
+
55
+ template <class _Tp, class _Up, class = void>
56
+ struct __cond_res_workaround {};
57
+
58
+ template <class _Tp, class _Up>
59
+ struct __cond_res_workaround<_Tp, _Up, void_t<__cond_res_if_right<_Tp, _Up>>> {
60
+ using _RTp = remove_cvref_t<_Tp>;
61
+ using type = conditional_t<is_same_v<_RTp, remove_cvref_t<_Up>> &&
62
+ (is_scalar_v<_RTp> || is_array_v<_RTp>) &&
63
+ ((is_lvalue_reference_v<_Tp> && is_rvalue_reference_v<_Up>) || (is_rvalue_reference_v<_Tp> && is_lvalue_reference_v<_Up>)),
64
+ decay_t<__copy_cv_t<remove_reference_t<_Tp>, remove_reference_t<_Up>>>, __cond_res_if_right<_Tp, _Up>>;
65
+ };
66
+
67
+ template <class _Xp, class _Yp>
68
+ using __cond_res = typename __cond_res_workaround<_Xp, _Yp>::type;
69
+ #else // ^^^ MSVC ^^^ / vvv !MSVC vvv
70
+ template <class _Xp, class _Yp>
71
+ using __cond_res =
72
+ decltype(false ? _CUDA_VSTD::declval<_Xp(&)()>()() : _CUDA_VSTD::declval<_Yp(&)()>()());
73
+ #endif // !MSVC
74
+
75
+ // Let `XREF(A)` denote a unary alias template `T` such that `T<U>` denotes the same type as `U`
76
+ // with the addition of `A`'s cv and reference qualifiers, for a non-reference cv-unqualified type
77
+ // `U`.
78
+ // [Note: `XREF(A)` is `__xref<A>::template __apply`]
79
+ template <class _Tp>
80
+ struct __xref {
81
+ template<class _Up>
82
+ using __apply = __copy_cvref_t<_Tp, _Up>;
83
+ };
84
+
85
+ // Given types A and B, let X be remove_reference_t<A>, let Y be remove_reference_t<B>,
86
+ // and let COMMON-REF(A, B) be:
87
+ template<class _Ap, class _Bp, class = void>
88
+ struct __common_ref;
89
+
90
+ template<class _Xp, class _Yp>
91
+ using __common_ref_t = typename __common_ref<_Xp, _Yp>::__type;
92
+
93
+ template<class _Xp, class _Yp>
94
+ using __cv_cond_res = __cond_res<__copy_cv_t<_Xp, _Yp>&, __copy_cv_t<_Yp, _Xp>&>;
95
+
96
+
97
+ // If A and B are both lvalue reference types, COMMON-REF(A, B) is
98
+ // COND-RES(COPYCV(X, Y)&, COPYCV(Y, X)&) if that type exists and is a reference type.
99
+ template<class _Ap, class _Bp>
100
+ struct __common_ref<_Ap&, _Bp&, enable_if_t<is_reference_v<__cv_cond_res<_Ap, _Bp>>>>
101
+ {
102
+ using __type = __cv_cond_res<_Ap, _Bp>;
103
+ };
104
+
105
+ // Otherwise, let C be remove_reference_t<COMMON-REF(X&, Y&)>&&. ...
106
+ template <class _Xp, class _Yp>
107
+ using __common_ref_C = remove_reference_t<__common_ref_t<_Xp&, _Yp&>>&&;
108
+
109
+
110
+ // .... If A and B are both rvalue reference types, C is well-formed, and
111
+ // is_convertible_v<A, C> && is_convertible_v<B, C> is true, then COMMON-REF(A, B) is C.
112
+ template<class _Ap, class _Bp, class = void>
113
+ struct __common_ref_rr {};
114
+
115
+ template<class _Ap, class _Bp>
116
+ struct __common_ref_rr<_Ap&&, _Bp&&, enable_if_t<
117
+ is_convertible_v<_Ap&&, __common_ref_C<_Ap, _Bp>>
118
+ && is_convertible_v<_Bp&&, __common_ref_C<_Ap, _Bp>>>>
119
+ {
120
+ using __type = __common_ref_C<_Ap, _Bp>;
121
+ };
122
+
123
+ template<class _Ap, class _Bp>
124
+ struct __common_ref<_Ap&&, _Bp&&> : __common_ref_rr<_Ap&&, _Bp&&> {};
125
+
126
+ // Otherwise, let D be COMMON-REF(const X&, Y&). ...
127
+ template <class _Tp, class _Up>
128
+ using __common_ref_D = __common_ref_t<const _Tp&, _Up&>;
129
+
130
+ // ... If A is an rvalue reference and B is an lvalue reference and D is well-formed and
131
+ // is_convertible_v<A, D> is true, then COMMON-REF(A, B) is D.
132
+ template<class _Ap, class _Bp, class = void>
133
+ struct __common_ref_lr {};
134
+
135
+ template<class _Ap, class _Bp>
136
+ struct __common_ref_lr<_Ap&&, _Bp&, enable_if_t<is_convertible_v<_Ap&&, __common_ref_D<_Ap, _Bp>>>>
137
+ {
138
+ using __type = __common_ref_D<_Ap, _Bp>;
139
+ };
140
+
141
+ template<class _Ap, class _Bp>
142
+ struct __common_ref<_Ap&&, _Bp&> : __common_ref_lr<_Ap&&, _Bp&> {};
143
+
144
+ // Otherwise, if A is an lvalue reference and B is an rvalue reference, then
145
+ // COMMON-REF(A, B) is COMMON-REF(B, A).
146
+ template<class _Ap, class _Bp>
147
+ struct __common_ref<_Ap&, _Bp&&> : __common_ref_lr<_Bp&&, _Ap&> {};
148
+
149
+ // Otherwise, COMMON-REF(A, B) is ill-formed.
150
+ template<class _Ap, class _Bp, class>
151
+ struct __common_ref {};
152
+
153
+ // Note C: For the common_reference trait applied to a parameter pack [...]
154
+
155
+ template <class...>
156
+ struct common_reference;
157
+
158
+ template <class... _Types>
159
+ using common_reference_t = typename common_reference<_Types...>::type;
160
+
161
+ #if _LIBCUDACXX_STD_VER > 11
162
+ template<class, class, class = void>
163
+ _LIBCUDACXX_INLINE_VAR constexpr bool __has_common_reference = false;
164
+
165
+ template<class _Tp, class _Up>
166
+ _LIBCUDACXX_INLINE_VAR constexpr bool __has_common_reference<_Tp, _Up, void_t<common_reference_t<_Tp, _Up>>> = true;
167
+ #endif // _LIBCUDACXX_STD_VER > 11
168
+
169
+ // bullet 1 - sizeof...(T) == 0
170
+ template<>
171
+ struct common_reference<> {};
172
+
173
+ // bullet 2 - sizeof...(T) == 1
174
+ template <class _Tp>
175
+ struct common_reference<_Tp>
176
+ {
177
+ using type = _Tp;
178
+ };
179
+
180
+ // bullet 3 - sizeof...(T) == 2
181
+ template <class _Tp, class _Up, class = void> struct __common_reference_sub_bullet3;
182
+ template <class _Tp, class _Up, class = void> struct __common_reference_sub_bullet2
183
+ : __common_reference_sub_bullet3<_Tp, _Up> {};
184
+ template <class _Tp, class _Up, class = void> struct __common_reference_sub_bullet1
185
+ : __common_reference_sub_bullet2<_Tp, _Up> {};
186
+
187
+ // sub-bullet 1 - If T1 and T2 are reference types and COMMON-REF(T1, T2) is well-formed, then
188
+ // the member typedef `type` denotes that type.
189
+ template <class _Tp, class _Up> struct common_reference<_Tp, _Up> : __common_reference_sub_bullet1<_Tp, _Up> {};
190
+
191
+ template <class _Tp, class _Up>
192
+ struct __common_reference_sub_bullet1<_Tp, _Up, void_t<__common_ref_t<_Tp, _Up>,
193
+ enable_if_t<is_reference_v<_Tp> && is_reference_v<_Up>>>>
194
+ {
195
+ using type = __common_ref_t<_Tp, _Up>;
196
+ };
197
+
198
+ // sub-bullet 2 - Otherwise, if basic_common_reference<remove_cvref_t<T1>, remove_cvref_t<T2>, XREF(T1), XREF(T2)>::type
199
+ // is well-formed, then the member typedef `type` denotes that type.
200
+ template <class, class, template <class> class, template <class> class> struct basic_common_reference {};
201
+
202
+ template <class _Tp, class _Up>
203
+ using __basic_common_reference_t = typename basic_common_reference<
204
+ remove_cvref_t<_Tp>, remove_cvref_t<_Up>,
205
+ __xref<_Tp>::template __apply, __xref<_Up>::template __apply>::type;
206
+
207
+ template <class _Tp, class _Up>
208
+ struct __common_reference_sub_bullet2<_Tp, _Up, void_t<__basic_common_reference_t<_Tp, _Up>>>
209
+ {
210
+ using type = __basic_common_reference_t<_Tp, _Up>;
211
+ };
212
+
213
+ // sub-bullet 3 - Otherwise, if COND-RES(T1, T2) is well-formed,
214
+ // then the member typedef `type` denotes that type.
215
+ template <class _Tp, class _Up>
216
+ struct __common_reference_sub_bullet3<_Tp, _Up, void_t<__cond_res<_Tp, _Up>>>
217
+ {
218
+ using type = __cond_res<_Tp, _Up>;
219
+ };
220
+
221
+
222
+ // sub-bullet 4 & 5 - Otherwise, if common_type_t<T1, T2> is well-formed,
223
+ // then the member typedef `type` denotes that type.
224
+ // - Otherwise, there shall be no member `type`.
225
+ template <class _Tp, class _Up, class> struct __common_reference_sub_bullet3 : common_type<_Tp, _Up> {};
226
+
227
+ // bullet 4 - If there is such a type `C`, the member typedef type shall denote the same type, if
228
+ // any, as `common_reference_t<C, Rest...>`.
229
+ template <class _Tp, class _Up, class _Vp, class... _Rest>
230
+ struct common_reference<_Tp, _Up, _Vp, void_t<common_reference_t<_Tp, _Up>>, _Rest...>
231
+ : common_reference<common_reference_t<_Tp, _Up>, _Vp, _Rest...>
232
+ {};
233
+
234
+ // bullet 5 - Otherwise, there shall be no member `type`.
235
+ template <class...> struct common_reference {};
236
+
237
+ #endif // _LIBCUDACXX_STD_VER > 11
238
+
239
+ _LIBCUDACXX_END_NAMESPACE_STD
240
+
241
+ #endif // _LIBCUDACXX___TYPE_TRAITS_COMMON_REFERENCE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/common_type.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_COMMON_TYPE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_COMMON_TYPE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/decay.h"
18
+ #include "../__type_traits/is_same.h"
19
+ #include "../__type_traits/remove_cvref.h"
20
+ #include "../__type_traits/void_t.h"
21
+ #include "../__utility/declval.h"
22
+
23
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
24
+ # pragma GCC system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
26
+ # pragma clang system_header
27
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
28
+ # pragma system_header
29
+ #endif // no system header
30
+
31
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
32
+
33
+ template <class... _Tp>
34
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type;
35
+
36
+ template <class ..._Tp>
37
+ using __common_type_t = typename common_type<_Tp...>::type;
38
+
39
+ // Let COND_RES(X, Y) be:
40
+ template <class _Tp, class _Up>
41
+ using __cond_type = decltype(false ? declval<_Tp>() : declval<_Up>());
42
+
43
+ #if _LIBCUDACXX_STD_VER > 17
44
+ template <class _Tp, class _Up, class = void>
45
+ struct __common_type3 {};
46
+
47
+ // sub-bullet 4 - "if COND_RES(CREF(D1), CREF(D2)) denotes a type..."
48
+ template <class _Tp, class _Up>
49
+ struct __common_type3<_Tp, _Up, void_t<__cond_type<const _Tp&, const _Up&>>>
50
+ {
51
+ using type = remove_cvref_t<__cond_type<const _Tp&, const _Up&>>;
52
+ };
53
+
54
+ template <class _Tp, class _Up, class = void>
55
+ struct __common_type2_imp : __common_type3<_Tp, _Up> {};
56
+ #else
57
+ template <class _Tp, class _Up, class = void>
58
+ struct __common_type2_imp {};
59
+ #endif
60
+
61
+ // sub-bullet 3 - "if decay_t<decltype(false ? declval<D1>() : declval<D2>())> ..."
62
+ template <class _Tp, class _Up>
63
+ struct __common_type2_imp<_Tp, _Up, __void_t<__cond_type<_Tp, _Up>>>
64
+ {
65
+ typedef _LIBCUDACXX_NODEBUG_TYPE __decay_t<__cond_type<_Tp, _Up>> type;
66
+ };
67
+
68
+ template <class, class = void>
69
+ struct __common_type_impl {};
70
+
71
+ template <class... _Tp>
72
+ struct __common_types;
73
+
74
+ template <class _Tp, class _Up>
75
+ struct __common_type_impl<
76
+ __common_types<_Tp, _Up>, __void_t<__common_type_t<_Tp, _Up>> >
77
+ {
78
+ typedef __common_type_t<_Tp, _Up> type;
79
+ };
80
+
81
+ template <class _Tp, class _Up, class _Vp, class... _Rest>
82
+ struct __common_type_impl<__common_types<_Tp, _Up, _Vp, _Rest...>, __void_t<__common_type_t<_Tp, _Up>> >
83
+ : __common_type_impl<__common_types<__common_type_t<_Tp, _Up>, _Vp, _Rest...>> {};
84
+
85
+ // bullet 1 - sizeof...(Tp) == 0
86
+
87
+ template <>
88
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<> {};
89
+
90
+ // bullet 2 - sizeof...(Tp) == 1
91
+
92
+ template <class _Tp>
93
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<_Tp>
94
+ : public common_type<_Tp, _Tp> {};
95
+
96
+ // bullet 3 - sizeof...(Tp) == 2
97
+
98
+ // sub-bullet 1 - "If is_same_v<T1, D1> is false or ..."
99
+ template <class _Tp, class _Up, class _D1 = __decay_t<_Tp>, class _D2 = __decay_t<_Up>>
100
+ struct __common_type2 : common_type<_D1, _D2> {};
101
+
102
+ template <class _Tp, class _Up>
103
+ struct __common_type2<_Tp, _Up, _Tp, _Up> : __common_type2_imp<_Tp, _Up> {};
104
+
105
+ template <class _Tp, class _Up>
106
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<_Tp, _Up>
107
+ : __common_type2<_Tp, _Up> {};
108
+
109
+ // bullet 4 - sizeof...(Tp) > 2
110
+
111
+ template <class _Tp, class _Up, class _Vp, class... _Rest>
112
+ struct _LIBCUDACXX_TEMPLATE_VIS common_type<_Tp, _Up, _Vp, _Rest...>
113
+ : __common_type_impl<__common_types<_Tp, _Up, _Vp, _Rest...> > {};
114
+
115
+ #if _LIBCUDACXX_STD_VER > 11
116
+ template <class ..._Tp> using common_type_t = typename common_type<_Tp...>::type;
117
+
118
+ template<class, class, class = void>
119
+ _LIBCUDACXX_INLINE_VAR constexpr bool __has_common_type = false;
120
+
121
+ template<class _Tp, class _Up>
122
+ _LIBCUDACXX_INLINE_VAR constexpr bool __has_common_type<_Tp, _Up, void_t<common_type_t<_Tp, _Up>>> = true;
123
+ #endif
124
+
125
+ _LIBCUDACXX_END_NAMESPACE_STD
126
+
127
+ #endif // _LIBCUDACXX___TYPE_TRAITS_COMMON_TYPE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/conditional.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_CONDITIONAL_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_CONDITIONAL_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <bool>
28
+ struct _IfImpl;
29
+
30
+ template <>
31
+ struct _IfImpl<true> {
32
+ template <class _IfRes, class _ElseRes>
33
+ using _Select _LIBCUDACXX_NODEBUG_TYPE = _IfRes;
34
+ };
35
+
36
+ template <>
37
+ struct _IfImpl<false> {
38
+ template <class _IfRes, class _ElseRes>
39
+ using _Select _LIBCUDACXX_NODEBUG_TYPE = _ElseRes;
40
+ };
41
+
42
+ template <bool _Cond, class _IfRes, class _ElseRes>
43
+ using _If _LIBCUDACXX_NODEBUG_TYPE = typename _IfImpl<_Cond>::template _Select<_IfRes, _ElseRes>;
44
+
45
+ template <bool _Bp, class _If, class _Then>
46
+ struct _LIBCUDACXX_TEMPLATE_VIS conditional {typedef _If type;};
47
+ template <class _If, class _Then>
48
+ struct _LIBCUDACXX_TEMPLATE_VIS conditional<false, _If, _Then> {typedef _Then type;};
49
+
50
+ #if _LIBCUDACXX_STD_VER > 11
51
+ template <bool _Bp, class _IfRes, class _ElseRes>
52
+ using conditional_t = typename conditional<_Bp, _IfRes, _ElseRes>::type;
53
+ #endif
54
+
55
+ // Helper so we can use "conditional_t" in all language versions.
56
+ template <bool _Bp, class _If, class _Then> using __conditional_t = typename conditional<_Bp, _If, _Then>::type;
57
+
58
+ _LIBCUDACXX_END_NAMESPACE_STD
59
+
60
+ #endif // _LIBCUDACXX___TYPE_TRAITS_CONDITIONAL_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/conjunction.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_CONJUNCTION_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_CONJUNCTION_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/conditional.h"
18
+ #include "../__type_traits/enable_if.h"
19
+ #include "../__type_traits/integral_constant.h"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ template <class...>
32
+ using __expand_to_true = true_type;
33
+
34
+ template <class... _Pred>
35
+ _LIBCUDACXX_INLINE_VISIBILITY __expand_to_true<__enable_if_t<_Pred::value>...> __and_helper(int);
36
+
37
+ template <class...>
38
+ _LIBCUDACXX_INLINE_VISIBILITY false_type __and_helper(...);
39
+
40
+ // _And always performs lazy evaluation of its arguments.
41
+ //
42
+ // However, `_And<_Pred...>` itself will evaluate its result immediately (without having to
43
+ // be instantiated) since it is an alias, unlike `conjunction<_Pred...>`, which is a struct.
44
+ // If you want to defer the evaluation of `_And<_Pred...>` itself, use `_Lazy<_And, _Pred...>`.
45
+ template <class... _Pred>
46
+ using _And _LIBCUDACXX_NODEBUG_TYPE = decltype(__and_helper<_Pred...>(0));
47
+
48
+ #if _LIBCUDACXX_STD_VER > 11
49
+
50
+ template <class...>
51
+ struct conjunction : true_type {};
52
+
53
+ template <class _Arg>
54
+ struct conjunction<_Arg> : _Arg {};
55
+
56
+ template <class _Arg, class... _Args>
57
+ struct conjunction<_Arg, _Args...> : conditional_t<!bool(_Arg::value), _Arg, conjunction<_Args...>> {};
58
+
59
+ template <class... _Args>
60
+ _LIBCUDACXX_INLINE_VAR constexpr bool conjunction_v = conjunction<_Args...>::value;
61
+
62
+ #endif // _LIBCUDACXX_STD_VER > 11
63
+
64
+ _LIBCUDACXX_END_NAMESPACE_STD
65
+
66
+ #endif // _LIBCUDACXX___TYPE_TRAITS_CONJUNCTION_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/copy_cv.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_COPY_CV_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_COPY_CV_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/add_const.h"
18
+ #include "../__type_traits/add_cv.h"
19
+ #include "../__type_traits/add_volatile.h"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ // Let COPYCV(FROM, TO) be an alias for type TO with the addition of FROM's
32
+ // top-level cv-qualifiers.
33
+ template <class _From, class _To>
34
+ struct __copy_cv
35
+ {
36
+ using type = _To;
37
+ };
38
+
39
+ template <class _From, class _To>
40
+ struct __copy_cv<const _From, _To>
41
+ {
42
+ using type = typename add_const<_To>::type;
43
+ };
44
+
45
+ template <class _From, class _To>
46
+ struct __copy_cv<volatile _From, _To>
47
+ {
48
+ using type = typename add_volatile<_To>::type;
49
+ };
50
+
51
+ template <class _From, class _To>
52
+ struct __copy_cv<const volatile _From, _To>
53
+ {
54
+ using type = typename add_cv<_To>::type;
55
+ };
56
+
57
+ template <class _From, class _To>
58
+ using __copy_cv_t = typename __copy_cv<_From, _To>::type;
59
+
60
+ _LIBCUDACXX_END_NAMESPACE_STD
61
+
62
+ #endif // _LIBCUDACXX___TYPE_TRAITS_COPY_CV_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/copy_cvref.h ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_COPY_CVREF_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_COPY_CVREF_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/add_lvalue_reference.h"
18
+ #include "../__type_traits/add_rvalue_reference.h"
19
+ #include "../__type_traits/copy_cv.h"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ template <class _From, class _To>
32
+ struct __copy_cvref
33
+ {
34
+ using type = __copy_cv_t<_From, _To>;
35
+ };
36
+
37
+ template <class _From, class _To>
38
+ struct __copy_cvref<_From&, _To>
39
+ {
40
+ using type = __add_lvalue_reference_t<__copy_cv_t<_From, _To> >;
41
+ };
42
+
43
+ template <class _From, class _To>
44
+ struct __copy_cvref<_From&&, _To>
45
+ {
46
+ using type = __add_rvalue_reference_t<__copy_cv_t<_From, _To> >;
47
+ };
48
+
49
+ template <class _From, class _To>
50
+ using __copy_cvref_t = typename __copy_cvref<_From, _To>::type;
51
+
52
+ _LIBCUDACXX_END_NAMESPACE_STD
53
+
54
+ #endif // _LIBCUDACXX___TYPE_TRAITS_COPY_CVREF_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/decay.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_DECAY_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_DECAY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/add_pointer.h"
18
+ #include "../__type_traits/conditional.h"
19
+ #include "../__type_traits/is_array.h"
20
+ #include "../__type_traits/is_function.h"
21
+ #include "../__type_traits/is_referenceable.h"
22
+ #include "../__type_traits/remove_cv.h"
23
+ #include "../__type_traits/remove_extent.h"
24
+ #include "../__type_traits/remove_reference.h"
25
+
26
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
27
+ # pragma GCC system_header
28
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
29
+ # pragma clang system_header
30
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
31
+ # pragma system_header
32
+ #endif // no system header
33
+
34
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
35
+
36
+ #if defined(_LIBCUDACXX_DECAY) && !defined(_LIBCUDACXX_USE_DECAY_FALLBACK)
37
+ template <class _Tp>
38
+ struct decay {
39
+ using type _LIBCUDACXX_NODEBUG_TYPE = _LIBCUDACXX_DECAY(_Tp);
40
+ };
41
+
42
+ #else
43
+
44
+ template <class _Up, bool>
45
+ struct __decay_impl {
46
+ typedef _LIBCUDACXX_NODEBUG_TYPE __remove_cv_t<_Up> type;
47
+ };
48
+
49
+ template <class _Up>
50
+ struct __decay_impl<_Up, true> {
51
+ public:
52
+ typedef _LIBCUDACXX_NODEBUG_TYPE __conditional_t
53
+ <
54
+ is_array<_Up>::value,
55
+ __remove_extent_t<_Up>*,
56
+ __conditional_t
57
+ <
58
+ is_function<_Up>::value,
59
+ __add_pointer_t<_Up>,
60
+ __remove_cv_t<_Up>
61
+ >
62
+ > type;
63
+ };
64
+
65
+ template <class _Tp>
66
+ struct _LIBCUDACXX_TEMPLATE_VIS decay
67
+ {
68
+ private:
69
+ typedef _LIBCUDACXX_NODEBUG_TYPE __libcpp_remove_reference_t<_Tp> _Up;
70
+ public:
71
+ typedef _LIBCUDACXX_NODEBUG_TYPE typename __decay_impl<_Up, __libcpp_is_referenceable<_Up>::value>::type type;
72
+ };
73
+ #endif // defined(_LIBCUDACXX_DECAY) && !defined(_LIBCUDACXX_USE_DECAY_FALLBACK)
74
+
75
+ template <class _Tp> using __decay_t = typename decay<_Tp>::type;
76
+
77
+ #if _LIBCUDACXX_STD_VER > 11
78
+ template <class _Tp> using decay_t = typename decay<_Tp>::type;
79
+ #endif
80
+
81
+ _LIBCUDACXX_END_NAMESPACE_STD
82
+
83
+ #endif // _LIBCUDACXX___TYPE_TRAITS_DECAY_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/dependent_type.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_DEPENDENT_TYPE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_DEPENDENT_TYPE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp, bool>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS __dependent_type : public _Tp {};
29
+
30
+ _LIBCUDACXX_END_NAMESPACE_STD
31
+
32
+ #endif // _LIBCUDACXX___TYPE_TRAITS_DEPENDENT_TYPE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/disjunction.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_DISJUNCTION_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_DISJUNCTION_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
20
+ # pragma GCC system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
22
+ # pragma clang system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
24
+ # pragma system_header
25
+ #endif // no system header
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <bool>
30
+ struct _OrImpl;
31
+
32
+ template <>
33
+ struct _OrImpl<true> {
34
+ template <class _Res, class _First, class... _Rest>
35
+ using _Result _LIBCUDACXX_NODEBUG_TYPE =
36
+ typename _OrImpl<!bool(_First::value) && sizeof...(_Rest) != 0>::template _Result<_First, _Rest...>;
37
+ };
38
+
39
+ template <>
40
+ struct _OrImpl<false> {
41
+ template <class _Res, class...>
42
+ using _Result = _Res;
43
+ };
44
+
45
+ // _Or always performs lazy evaluation of its arguments.
46
+ //
47
+ // However, `_Or<_Pred...>` itself will evaluate its result immediately (without having to
48
+ // be instantiated) since it is an alias, unlike `disjunction<_Pred...>`, which is a struct.
49
+ // If you want to defer the evaluation of `_Or<_Pred...>` itself, use `_Lazy<_Or, _Pred...>`
50
+ // or `disjunction<_Pred...>` directly.
51
+ template <class... _Args>
52
+ using _Or _LIBCUDACXX_NODEBUG_TYPE = typename _OrImpl<sizeof...(_Args) != 0>::template _Result<false_type, _Args...>;
53
+
54
+ #if _LIBCUDACXX_STD_VER > 11
55
+
56
+ #ifdef _LIBCUDACXX_COMPILER_MSVC
57
+ template <class... _Args>
58
+ struct disjunction : false_type {};
59
+
60
+ template <class _First, class... _Rest>
61
+ struct disjunction<_First, _Rest...> : _OrImpl<true>::template _Result<false_type, _First, _Rest...> {};
62
+ #else
63
+ template <class... _Args>
64
+ struct disjunction : _Or<_Args...> {};
65
+ #endif // !MSVC
66
+
67
+ template <class... _Args>
68
+ _LIBCUDACXX_INLINE_VAR constexpr bool disjunction_v = _Or<_Args...>::value;
69
+
70
+ #endif // _LIBCUDACXX_STD_VER > 11
71
+
72
+ _LIBCUDACXX_END_NAMESPACE_STD
73
+
74
+ #endif // _LIBCUDACXX___TYPE_TRAITS_DISJUNCTION_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/enable_if.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_ENABLE_IF_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_ENABLE_IF_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <bool, class _Tp = void> struct _LIBCUDACXX_TEMPLATE_VIS enable_if {};
28
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS enable_if<true, _Tp> {typedef _Tp type;};
29
+
30
+ template <bool _Bp, class _Tp = void> using __enable_if_t _LIBCUDACXX_NODEBUG_TYPE = typename enable_if<_Bp, _Tp>::type;
31
+
32
+ #if _LIBCUDACXX_STD_VER > 11
33
+ template <bool _Bp, class _Tp = void> using enable_if_t = typename enable_if<_Bp, _Tp>::type;
34
+ #endif
35
+
36
+ _LIBCUDACXX_END_NAMESPACE_STD
37
+
38
+ #endif // _LIBCUDACXX___TYPE_TRAITS_ENABLE_IF_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/extent.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //
9
+ //===----------------------------------------------------------------------===//
10
+
11
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_EXTENT_H
12
+ #define _LIBCUDACXX___TYPE_TRAITS_EXTENT_H
13
+
14
+ #ifndef __cuda_std__
15
+ #include <__config>
16
+ #endif // __cuda_std__
17
+
18
+ #include "../__type_traits/integral_constant.h"
19
+ #include "../cstddef"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ #if defined(_LIBCUDACXX_ARRAY_EXTENT) && !defined(_LIBCUDACXX_USE_ARRAY_EXTENT_FALLBACK)
32
+
33
+ template<class _Tp, size_t _Dim = 0>
34
+ struct _LIBCUDACXX_TEMPLATE_VIS extent
35
+ : integral_constant<size_t, _LIBCUDACXX_ARRAY_EXTENT(_Tp, _Dim)> { };
36
+
37
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
38
+ template <class _Tp, unsigned _Ip = 0>
39
+ _LIBCUDACXX_INLINE_VAR constexpr size_t extent_v = _LIBCUDACXX_ARRAY_EXTENT(_Tp, _Ip);
40
+ #endif
41
+
42
+ #else
43
+
44
+ template <class _Tp, unsigned _Ip = 0> struct _LIBCUDACXX_TEMPLATE_VIS extent
45
+ : public integral_constant<size_t, 0> {};
46
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS extent<_Tp[], 0>
47
+ : public integral_constant<size_t, 0> {};
48
+ template <class _Tp, unsigned _Ip> struct _LIBCUDACXX_TEMPLATE_VIS extent<_Tp[], _Ip>
49
+ : public integral_constant<size_t, extent<_Tp, _Ip-1>::value> {};
50
+ template <class _Tp, size_t _Np> struct _LIBCUDACXX_TEMPLATE_VIS extent<_Tp[_Np], 0>
51
+ : public integral_constant<size_t, _Np> {};
52
+ template <class _Tp, size_t _Np, unsigned _Ip> struct _LIBCUDACXX_TEMPLATE_VIS extent<_Tp[_Np], _Ip>
53
+ : public integral_constant<size_t, extent<_Tp, _Ip-1>::value> {};
54
+
55
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
56
+ template <class _Tp, unsigned _Ip = 0>
57
+ _LIBCUDACXX_INLINE_VAR constexpr size_t extent_v = extent<_Tp, _Ip>::value;
58
+ #endif
59
+
60
+ #endif // defined(_LIBCUDACXX_ARRAY_EXTENT) && !defined(_LIBCUDACXX_USE_ARRAY_EXTENT_FALLBACK)
61
+
62
+ _LIBCUDACXX_END_NAMESPACE_STD
63
+
64
+ #endif // _LIBCUDACXX___TYPE_TRAITS_EXTENT_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/has_unique_object_representation.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_HAS_UNIQUE_OBJECT_REPRESENTATION_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_HAS_UNIQUE_OBJECT_REPRESENTATION_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/remove_all_extents.h"
19
+ #include "../__type_traits/remove_cv.h"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ #if _LIBCUDACXX_STD_VER > 11 && defined(_LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
32
+
33
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS has_unique_object_representations
34
+ : public integral_constant<bool,
35
+ __has_unique_object_representations(remove_cv_t<remove_all_extents_t<_Tp>>)> {};
36
+
37
+ #if !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
38
+ template <class _Tp>
39
+ _LIBCUDACXX_INLINE_VAR constexpr bool has_unique_object_representations_v = has_unique_object_representations<_Tp>::value;
40
+ #endif
41
+
42
+ #endif
43
+
44
+ _LIBCUDACXX_END_NAMESPACE_STD
45
+
46
+ #endif // _LIBCUDACXX___TYPE_TRAITS_HAS_UNIQUE_OBJECT_REPRESENTATION_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/has_virtual_destructor.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_HAS_VIRTUAL_DESTRUCTOR_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_HAS_VIRTUAL_DESTRUCTOR_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
20
+ # pragma GCC system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
22
+ # pragma clang system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
24
+ # pragma system_header
25
+ #endif // no system header
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #if defined(_LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR) && !defined(_LIBCUDACXX_USE_HAS_VIRTUAL_DESTRUCTOR_FALLBACK)
30
+
31
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS has_virtual_destructor
32
+ : public integral_constant<bool, _LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR(_Tp)> {};
33
+
34
+ #else
35
+
36
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS has_virtual_destructor
37
+ : public false_type {};
38
+
39
+ #endif // defined(_LIBCUDACXX_HAS_VIRTUAL_DESTRUCTOR) && !defined(_LIBCUDACXX_USE_HAS_VIRTUAL_DESTRUCTOR_FALLBACK)
40
+
41
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
42
+ template <class _Tp>
43
+ _LIBCUDACXX_INLINE_VAR constexpr bool has_virtual_destructor_v
44
+ = has_virtual_destructor<_Tp>::value;
45
+ #endif
46
+
47
+ _LIBCUDACXX_END_NAMESPACE_STD
48
+
49
+ #endif // _LIBCUDACXX___TYPE_TRAITS_HAS_VIRTUAL_DESTRUCTOR_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/integral_constant.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_INTEGRAL_CONSTANT_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_INTEGRAL_CONSTANT_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
18
+ # pragma GCC system_header
19
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
20
+ # pragma clang system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
22
+ # pragma system_header
23
+ #endif // no system header
24
+
25
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
26
+
27
+ template <class _Tp, _Tp __v>
28
+ struct _LIBCUDACXX_TEMPLATE_VIS integral_constant
29
+ {
30
+ static constexpr const _Tp value = __v;
31
+ typedef _Tp value_type;
32
+ typedef integral_constant type;
33
+ _LIBCUDACXX_INLINE_VISIBILITY
34
+ constexpr operator value_type() const noexcept {return value;}
35
+ #if _LIBCUDACXX_STD_VER > 11
36
+ _LIBCUDACXX_INLINE_VISIBILITY
37
+ constexpr value_type operator ()() const noexcept {return value;}
38
+ #endif
39
+ };
40
+
41
+ template <class _Tp, _Tp __v>
42
+ constexpr const _Tp integral_constant<_Tp, __v>::value;
43
+
44
+ typedef integral_constant<bool, true> true_type;
45
+ typedef integral_constant<bool, false> false_type;
46
+
47
+ template <bool _Val>
48
+ using _BoolConstant _LIBCUDACXX_NODEBUG_TYPE = integral_constant<bool, _Val>;
49
+
50
+ #if _LIBCUDACXX_STD_VER > 11
51
+ template <bool __b>
52
+ using bool_constant = integral_constant<bool, __b>;
53
+ #endif
54
+
55
+ #if _LIBCUDACXX_STD_VER > 11
56
+ #define _LIBCUDACXX_BOOL_CONSTANT(__b) bool_constant<(__b)>
57
+ #else
58
+ #define _LIBCUDACXX_BOOL_CONSTANT(__b) integral_constant<bool,(__b)>
59
+ #endif
60
+
61
+ _LIBCUDACXX_END_NAMESPACE_STD
62
+
63
+ #endif // _LIBCUDACXX___TYPE_TRAITS_INTEGRAL_CONSTANT_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_abstract.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ABSTRACT_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ABSTRACT_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
20
+ # pragma GCC system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
22
+ # pragma clang system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
24
+ # pragma system_header
25
+ #endif // no system header
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_abstract
30
+ : public integral_constant<bool, __is_abstract(_Tp)> {};
31
+
32
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
33
+ template <class _Tp>
34
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_abstract_v = __is_abstract(_Tp);
35
+ #endif
36
+
37
+ _LIBCUDACXX_END_NAMESPACE_STD
38
+
39
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ABSTRACT_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_aggregate.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_AGGREGATE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_AGGREGATE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+
19
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
20
+ # pragma GCC system_header
21
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
22
+ # pragma clang system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
24
+ # pragma system_header
25
+ #endif // no system header
26
+
27
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
28
+
29
+ #if _LIBCUDACXX_STD_VER > 11 && defined(_LIBCUDACXX_IS_AGGREGATE)
30
+
31
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS
32
+ is_aggregate : public integral_constant<bool, _LIBCUDACXX_IS_AGGREGATE(_Tp)> {};
33
+
34
+ #if !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
35
+ template <class _Tp>
36
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_aggregate_v = _LIBCUDACXX_IS_AGGREGATE(_Tp);
37
+ #endif
38
+
39
+ #endif // _LIBCUDACXX_STD_VER > 11 && defined(_LIBCUDACXX_IS_AGGREGATE)
40
+
41
+ _LIBCUDACXX_END_NAMESPACE_STD
42
+
43
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_AGGREGATE_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_allocator.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_IS_ALLOCATOR_H
11
+ #define _LIBCUDACXX___TYPE_IS_ALLOCATOR_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/void_t.h"
19
+ #include "../__utility/declval.h"
20
+ #include "../cstddef"
21
+
22
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
23
+ # pragma GCC system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
25
+ # pragma clang system_header
26
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
27
+ # pragma system_header
28
+ #endif // no system header
29
+
30
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
31
+
32
+ template<typename _Alloc, typename = void, typename = void>
33
+ struct __is_allocator : false_type {};
34
+
35
+ template<typename _Alloc>
36
+ struct __is_allocator<_Alloc,
37
+ __void_t<typename _Alloc::value_type>,
38
+ __void_t<decltype(_CUDA_VSTD::declval<_Alloc&>().allocate(size_t(0)))>
39
+ >
40
+ : true_type {};
41
+
42
+ _LIBCUDACXX_END_NAMESPACE_STD
43
+
44
+ #endif // _LIBCUDACXX___TYPE_IS_ALLOCATOR_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_arithmetic.h ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ARITHMETIC_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ARITHMETIC_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_floating_point.h"
19
+ #include "../__type_traits/is_integral.h"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_arithmetic
32
+ : public integral_constant<bool, is_integral<_Tp>::value ||
33
+ is_floating_point<_Tp>::value> {};
34
+
35
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
36
+ template <class _Tp>
37
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_arithmetic_v = is_arithmetic<_Tp>::value;
38
+ #endif
39
+
40
+ _LIBCUDACXX_END_NAMESPACE_STD
41
+
42
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ARITHMETIC_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_array.h ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ARRAY_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ARRAY_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../cstddef"
19
+
20
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
21
+ # pragma GCC system_header
22
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
23
+ # pragma clang system_header
24
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
25
+ # pragma system_header
26
+ #endif // no system header
27
+
28
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
29
+
30
+ // TODO: Clang incorrectly reports that __is_array is true for T[0].
31
+ // Re-enable the branch once https://llvm.org/PR54705 is fixed.
32
+ #if defined(_LIBCUDACXX_IS_ARRAY) && !defined(_LIBCUDACXX_USE_IS_ARRAY_FALLBACK)
33
+
34
+ template <class _Tp>
35
+ struct _LIBCUDACXX_TEMPLATE_VIS is_array
36
+ : public integral_constant<bool, _LIBCUDACXX_IS_ARRAY(_Tp)>
37
+ {};
38
+
39
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
40
+ template <class _Tp>
41
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_array_v = _LIBCUDACXX_IS_ARRAY(_Tp);
42
+ #endif
43
+
44
+ #else
45
+
46
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_array
47
+ : public false_type {};
48
+ template <class _Tp> struct _LIBCUDACXX_TEMPLATE_VIS is_array<_Tp[]>
49
+ : public true_type {};
50
+ template <class _Tp, size_t _Np> struct _LIBCUDACXX_TEMPLATE_VIS is_array<_Tp[_Np]>
51
+ : public true_type {};
52
+
53
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
54
+ template <class _Tp>
55
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_array_v = is_array<_Tp>::value;
56
+ #endif
57
+
58
+ #endif // defined(_LIBCUDACXX_IS_ARRAY) && !defined(_LIBCUDACXX_USE_IS_ARRAY_FALLBACK)
59
+
60
+ _LIBCUDACXX_END_NAMESPACE_STD
61
+
62
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ARRAY_H
miniCUDA124/include/cuda/std/detail/libcxx/include/__type_traits/is_assignable.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //===----------------------------------------------------------------------===//
2
+ //
3
+ // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
+ // See https://llvm.org/LICENSE.txt for license information.
5
+ // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
+ // SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
7
+ //
8
+ //===----------------------------------------------------------------------===//
9
+
10
+ #ifndef _LIBCUDACXX___TYPE_TRAITS_IS_ASSIGNABLE_H
11
+ #define _LIBCUDACXX___TYPE_TRAITS_IS_ASSIGNABLE_H
12
+
13
+ #ifndef __cuda_std__
14
+ #include <__config>
15
+ #endif // __cuda_std__
16
+
17
+ #include "../__type_traits/integral_constant.h"
18
+ #include "../__type_traits/is_void.h"
19
+ #include "../__utility/declval.h"
20
+
21
+ #if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC)
22
+ # pragma GCC system_header
23
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG)
24
+ # pragma clang system_header
25
+ #elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC)
26
+ # pragma system_header
27
+ #endif // no system header
28
+
29
+ _LIBCUDACXX_BEGIN_NAMESPACE_STD
30
+
31
+ template<typename, typename _Tp> struct __select_2nd { typedef _LIBCUDACXX_NODEBUG_TYPE _Tp type; };
32
+
33
+ #if defined(_LIBCUDACXX_IS_ASSIGNABLE) && !defined(_LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK)
34
+
35
+ template <class _T1, class _T2> struct _LIBCUDACXX_TEMPLATE_VIS is_assignable
36
+ : public integral_constant<bool, _LIBCUDACXX_IS_ASSIGNABLE(_T1, _T2)>
37
+ {};
38
+
39
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
40
+ template <class _T1, class _T2>
41
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_assignable_v = _LIBCUDACXX_IS_ASSIGNABLE(_T1, _T2);
42
+ #endif
43
+
44
+ #else
45
+
46
+ template <class _Tp, class _Arg>
47
+ _LIBCUDACXX_INLINE_VISIBILITY
48
+ typename __select_2nd<decltype((_CUDA_VSTD::declval<_Tp>() = _CUDA_VSTD::declval<_Arg>())), true_type>::type
49
+ __is_assignable_test(int);
50
+
51
+ template <class, class>
52
+ _LIBCUDACXX_INLINE_VISIBILITY
53
+ false_type __is_assignable_test(...);
54
+
55
+ template <class _Tp, class _Arg, bool = is_void<_Tp>::value || is_void<_Arg>::value>
56
+ struct __is_assignable_imp
57
+ : public decltype((_CUDA_VSTD::__is_assignable_test<_Tp, _Arg>(0))) {};
58
+
59
+ template <class _Tp, class _Arg>
60
+ struct __is_assignable_imp<_Tp, _Arg, true>
61
+ : public false_type
62
+ {
63
+ };
64
+
65
+ template <class _Tp, class _Arg>
66
+ struct is_assignable
67
+ : public __is_assignable_imp<_Tp, _Arg> {};
68
+
69
+ #if _LIBCUDACXX_STD_VER > 11 && !defined(_LIBCUDACXX_HAS_NO_VARIABLE_TEMPLATES)
70
+ template <class _Tp, class _Arg>
71
+ _LIBCUDACXX_INLINE_VAR constexpr bool is_assignable_v = is_assignable<_Tp, _Arg>::value;
72
+ #endif
73
+
74
+ #endif // defined(_LIBCUDACXX_IS_ASSIGNABLE) && !defined(_LIBCUDACXX_USE_IS_ASSIGNABLE_FALLBACK)
75
+
76
+ _LIBCUDACXX_END_NAMESPACE_STD
77
+
78
+ #endif // _LIBCUDACXX___TYPE_TRAITS_IS_ASSIGNABLE_H